repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
levilucio/SyVOLT | GM2AUTOSAR_MM/Properties/positive/Himesis/HM5ThenClausePart2CompleteLHS.py | 1 | 17412 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
class HM5ThenClausePart2CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM5ThenClausePart2CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM5ThenClausePart2CompleteLHS, self).__init__(name='HM5ThenClausePart2CompleteLHS', num_nodes=5, edges=[])
# Add the edges
self.add_edges([[3, 1], [4, 2], [0, 3], [0, 4]])
# Set the graph attributes
self["mm__"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']
self["MT_constraint__"] = """if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':
return True
return False
"""
self["name"] = """"""
self["GUID__"] = 2625040599713433069
# Set the node attributes
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pivotIn__"] = """element1"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__System"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = 8091276301164502443
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pivotIn__"] = """element2"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = 7433647115890205089
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = 2267431976090093383
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__directLink_T"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["GUID__"] = 7971122764903685858
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__directLink_T"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["GUID__"] = 9086016569480805560
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':
return True
return False
| mit | 6,289,185,427,386,588,000 | 48.047887 | 154 | 0.515679 | false |
SimonLane/RTLC | dist/Configuration.py | 1 | 1530 | import ConfigParser, os
def load_config(self):
config = ConfigParser.ConfigParser()
#test for config file
if os.path.exists('config.cfg'):
config.read('config.cfg')
print "Reading config file..."
self.User_root = config.get('Directory', 'User_root', 0)
self.Confocal_out = config.get('Directory', 'Confocal_out', 0)
self.Laser1 = config.get('Lasers', 'Laser1', 0)
self.Laser2 = config.get('Lasers', 'Laser2', 0)
self.Laser3 = config.get('Lasers', 'Laser3', 0)
self.Setup_job = config.get('Jobs', 'Setup_job', 0)
self.Overview_job = config.get('Jobs', 'Overview_job', 0)
self.Zoom_job = config.get('Jobs', 'Zoom_job', 0)
else:
#if error create config file
print "No confog file, creating one now"
config.add_section('Directory')
config.set('Directory', 'User_root', 'D:\\Experiments')
config.set('Directory', 'Confocal_out', 'D:\\CAM_STORE\\FromConfocal')
config.add_section('Jobs')
config.set('Jobs', 'Setup_job', 'setup')
config.set('Jobs', 'Zoom_job', 'zoom')
config.set('Jobs', 'Overview_job', 'OV')
config.add_section('Lasers')
config.set('Lasers', 'Laser1', '488')
config.set('Lasers', 'Laser2', '514')
config.set('Lasers', 'Laser3', '594')
config.set('Lasers', 'Laser_limit', '2')
with open('config.cfg', 'w') as configfile:
config.write(configfile)
load_config(self)
| gpl-3.0 | -8,790,537,820,653,622,000 | 40.351351 | 78 | 0.579085 | false |
bitcoinfees/bitcoin-feemodel | feemodel/appdirs.py | 1 | 22491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
# flake8: noqa
__version_info__ = (1, 4, 1)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| mit | -1,660,297,250,871,240,400 | 39.232558 | 122 | 0.617163 | false |
galaxy-team/website | newrelic/hooks/framework_bottle.py | 1 | 2494 | import newrelic.api.web_transaction
import newrelic.api.out_function
import newrelic.api.name_transaction
import newrelic.api.error_trace
import newrelic.api.function_trace
def instrument(module):
version = map(int, module.__version__.split('.'))
def out_Bottle_match(result):
callback, args = result
callback = newrelic.api.name_transaction.NameTransactionWrapper(
callback)
callback = newrelic.api.error_trace.ErrorTraceWrapper(callback,
ignore_errors=['bottle:HTTPResponse', 'bottle:RouteReset',
'bottle:HTTPError'])
return callback, args
def out_Route_make_callback(callback):
callback = newrelic.api.name_transaction.NameTransactionWrapper(
callback)
callback = newrelic.api.error_trace.ErrorTraceWrapper(callback,
ignore_errors=['bottle:HTTPResponse', 'bottle:RouteReset',
'bottle:HTTPError'])
return callback
if version >= [0, 10, 0]:
newrelic.api.web_transaction.wrap_wsgi_application(
module, 'Bottle.wsgi')
newrelic.api.out_function.wrap_out_function(
module, 'Route._make_callback', out_Route_make_callback)
elif version >= [0, 9, 0]:
newrelic.api.web_transaction.wrap_wsgi_application(
module, 'Bottle.wsgi')
newrelic.api.out_function.wrap_out_function(
module, 'Bottle._match', out_Bottle_match)
else:
newrelic.api.web_transaction.wrap_wsgi_application(
module, 'Bottle.__call__')
newrelic.api.out_function.wrap_out_function(
module, 'Bottle.match_url', out_Bottle_match)
if hasattr(module, 'SimpleTemplate'):
newrelic.api.function_trace.wrap_function_trace(
module, 'SimpleTemplate.render')
if hasattr(module, 'MakoTemplate'):
newrelic.api.function_trace.wrap_function_trace(
module, 'MakoTemplate.render')
if hasattr(module, 'CheetahTemplate'):
newrelic.api.function_trace.wrap_function_trace(
module, 'CheetahTemplate.render')
if hasattr(module, 'Jinja2Template'):
newrelic.api.function_trace.wrap_function_trace(
module, 'Jinja2Template.render')
if hasattr(module, 'SimpleTALTemplate'):
newrelic.api.function_trace.wrap_function_trace(
module, 'SimpleTALTemplate.render')
| agpl-3.0 | -5,393,824,894,224,822,000 | 36.223881 | 74 | 0.631115 | false |
tomucu/pse | server/webservice/detection_app/tests.py | 1 | 1720 | import json
from django.test import TestCase
from django.test.client import Client
class SimpleTest(TestCase):
def setUp(self):
self.client = Client()
def test_mona_lisa_1(self):
# TODO(sghiaus): Change test images to png.
mona_lisa_1 = open("detection_app/testdata/MonaLisa1.png", "rb")
mona_lisa_1_response = self.client.post('/identify_painting/', {
'image': mona_lisa_1,
'x': '2000',
'y': '1350',
})
self.assertEqual(200, mona_lisa_1_response.status_code)
mona_lisa_1_response_json = json.loads(mona_lisa_1_response.content)
self.assertEqual(u'Mona Lisa', mona_lisa_1_response_json['name'])
def test_mona_lisa_2(self):
mona_lisa_2 = open("detection_app/testdata/MonaLisa2.png", "rb")
mona_lisa_2_response = self.client.post('/identify_painting/', {
'image': mona_lisa_2,
'x': '1200',
'y': '1400',
})
self.assertEqual(200, mona_lisa_2_response.status_code)
mona_lisa_2_response_json = json.loads(mona_lisa_2_response.content)
self.assertEqual(u'MonaLisa', mona_lisa_2_response_json['name'])
def test_paintin_starry_night(self):
starry_night = open("detection_app/testdata/StarryNightPhoto.png", "rb")
starry_night_response = self.client.post('/identify_painting/', {
'image': starry_night,
'x': '750',
'y': '450',
})
self.assertEqual(200, starry_night_response.status_code)
starry_night_response_json = json.loads(starry_night_response.content)
self.assertEqual(u'The Starry Night', starry_night_response_json['name'])
| mit | 7,223,417,858,786,676,000 | 39 | 81 | 0.607558 | false |
BrainBot/advent | 2016/p22/p22.py | 1 | 1776 | import re
from collections import defaultdict
GOAL_CHAR = 'G'
EMPTY_CHAR = '_'
NORMAL_CHAR = '.'
BIG_CHAR = '#'
START = (35,0)
def print_grid(grid, goal, goal_size):
for l, j in enumerate(grid):
for k, i in enumerate(j):
print_char = NORMAL_CHAR
if i[1] > 100:
print_char = BIG_CHAR
elif i[0] - i[1] > goal_size:
print_char = EMPTY_CHAR
if (k,l) == goal:
print_char = GOAL_CHAR
print print_char,
print "\n"
f = open("p22.input")
inputs = [l.strip() for l in f.readlines()]
number_re = re.compile(r'(\d+)')
# remove the headers
inputs = inputs[2:]
nodes_map = {}
nodes_list = []
viable_pairs = defaultdict(list)
num_v_pairs = 0
grid = []
for i in range(30):
grid.append([0 for i in range(36)])
for i in inputs:
# Filesystem Size Used Avail Use%
#/dev/grid/node-x0-y0 91T 66T 25T 72%
x, y, size, used, avail, use_per = number_re.findall(i)
x, y = int(x), int(y)
nodes_list.append((x,y))
node_data = (int(size), int(used))
nodes_map[(x,y)] = node_data
grid[y][x] = node_data
def part1():
for A in nodes_list:
if nodes_map[A][1] == 0:
continue
for B in nodes_list:
if B == A:
continue
if nodes_map[A][1] <= nodes_map[B][0] - nodes_map[B][1]:
viable_pairs[A].append(B)
num_v_pairs += 1
print num_v_pairs
def part2():
# we could have a BFS to move the empty space next to the goal on the top row
# then do the shuffle algo to move it all along
# or just look at the data and see we can solve it by hand...
print_grid(grid, START, nodes_map[START][1])
| mit | 5,136,835,176,271,946,000 | 23.666667 | 81 | 0.538851 | false |
broodhand/ntes | tmp_backup/db/mysql/backup/sqlalchemydb.py | 1 | 1237 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 11:02:14 2016
@author:Zhao Cheng
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import configparser
class SqlalchemyDB(object):
def __init__(self, sqlalchemycfg='sqlalchemydb.cfg'):
self.sqlalchemycfg = sqlalchemycfg
config = configparser.ConfigParser()
with open(self.sqlalchemycfg, 'r') as cfgfile:
config.read_file(cfgfile)
self.cfghost = config.get('SQLALCHEMY', 'host')
self.cfgport = config.get('SQLALCHEMY', 'port')
self.cfgdatabase = config.get('SQLALCHEMY', 'db')
self.cfguser = config.get('SQLALCHEMY', 'user')
self.cfgpassword = config.get('SQLALCHEMY', 'password')
self.cfgtype = config.get('SQLALCHEMY', 'type')
self.cfgdriver = config.get('SQLALCHEMY', 'driver')
self.constr = '%s+%s://%s:%s@%s:%s/%s' % (
self.cfgtype, self.cfgdriver, self.cfguser, self.cfgpassword, self.cfghost, self.cfgport, self.cfgdatabase)
# 获取sqlalchemy的session
def getsession(self):
engine = create_engine(self.constr)
Dbsession = sessionmaker(bind=engine)
return Dbsession()
| gpl-3.0 | -4,475,134,883,985,731,600 | 36.30303 | 119 | 0.635256 | false |
kevin-coder/tensorflow-fork | tensorflow/python/saved_model/load_test.py | 1 | 48380 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import tempfile
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import tf_inspect
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class LoadTest(test.TestCase, parameterized.TestCase):
def cycle(self, obj, cycles, signatures=None):
to_save = obj
# TODO(vbardiovsky): It would be nice if exported protos reached a fixed
# point w.r.t. saving/restoring, ideally after 2nd saving.
for _ in range(cycles):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(to_save, path, signatures)
loaded = load.load(path)
to_save = loaded
return loaded
def test_structure_import(self, cycles):
root = tracking.AutoTrackable()
root.dep_one = tracking.AutoTrackable()
root.dep_two = tracking.AutoTrackable()
root.dep_two.dep = tracking.AutoTrackable()
root.dep_three = root.dep_two.dep
imported = self.cycle(root, cycles)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
def test_variables(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1., trainable=True)
root.v2 = variables.Variable(2., trainable=False)
imported = self.cycle(root, cycles)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertFalse(imported.v2.trainable)
def test_capture_variables(self, cycles):
root = tracking.AutoTrackable()
root.weights = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
imported.weights.assign(4.0)
self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())
def test_control_outputs(self, cycles):
exported = tracking.AutoTrackable()
exported.v = variables.Variable(1.)
exported.f = def_function.function(
lambda: exported.v.assign(2., name="should_be_control_output"))
exported_graph = exported.f.get_concrete_function().graph
self.assertIn(
exported_graph.get_operation_by_name("should_be_control_output"),
exported_graph.control_outputs)
imported = self.cycle(exported, cycles)
# Calling get_concrete_function wraps in a second call operation; we want to
# inspect the original function body for the control output; digging into
# graph.as_graph_def() and its FunctionDefLibrary is another option.
imported_concrete, = imported.f._concrete_functions
imported_graph = imported_concrete.graph
self.assertIn(
imported_graph.get_operation_by_name("should_be_control_output"),
imported_graph.control_outputs)
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
def test_assets(self, cycles):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.AutoTrackable()
root.asset1 = tracking.TrackableAsset(file1)
root.asset2 = tracking.TrackableAsset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(imported.asset1.asset_path.numpy(), "r") as f:
self.assertEqual("contents 1", f.read())
with open(imported.asset2.asset_path.numpy(), "r") as f:
self.assertEqual("contents 2", f.read())
def test_capture_assets(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = self.cycle(root, cycles)
original_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_capture_assets_in_graph(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
original_output = root.f().numpy()
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = load.load(path)
imported_tensor = imported.f()
with monitored_session.MonitoredSession() as sess:
imported_output = sess.run(imported_tensor)
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_dedup_assets(self, cycles):
vocab = self._make_asset("contents")
root = tracking.AutoTrackable()
root.asset1 = tracking.TrackableAsset(vocab)
root.asset2 = tracking.TrackableAsset(vocab)
imported = self.cycle(root, cycles)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_explicit_save_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(
root, cycles, {
"f":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self, cycles):
f = def_function.function(
lambda x: x*2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.g = g
imported = self.cycle(root, cycles)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self, cycles):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_function_with_default_none_input(self, cycles):
def func(x, dtype=None):
if dtype:
return array_ops.zeros(shape=x.shape, dtype=dtype)
else:
return array_ops.zeros(shape=x.shape, dtype=dtypes.float32)
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
root.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(4, len(concrete_functions))
imported = self.cycle(root, cycles)
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3]),
None).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1.0, 2.0,
3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
imported.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
def test_function_no_return(self, cycles):
class TrackableWithOneVariable(tracking.AutoTrackable):
def __init__(self, initial_value=0.0):
super(TrackableWithOneVariable, self).__init__()
self.variable = variables.Variable(initial_value)
@def_function.function
def increase(self, by=1.0):
self.variable.assign_add(by)
obj = TrackableWithOneVariable(5.0)
obj.increase(constant_op.constant(10.0))
self.assertEqual(15.0, obj.variable.numpy())
obj.increase()
self.assertEqual(16.0, obj.variable.numpy())
imported = self.cycle(obj, cycles)
imported.increase(constant_op.constant(10.0))
self.assertEqual(26.0, imported.variable.numpy())
imported.increase(constant_op.constant(1.0))
self.assertEqual(27.0, imported.variable.numpy())
def test_structured_inputs(self, cycles):
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
# Note: by only calling f(input1) before serialization, only inputs with
# matching signature will be valid on the loaded model.
self.assertEqual(31, root.f(input1).numpy())
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f(input2)
self.assertEqual(31, imported.f(input1).numpy())
self.assertEqual(32, imported.f(input3).numpy())
def test_structured_output(self, cycles):
# Use fields with non-alphabetical order
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = tracking.AutoTrackable()
root.f = def_function.function(func)
result = root.f(constant_op.constant(2), constant_op.constant(3))
self.assertEqual(5, result[0].a.numpy())
self.assertEqual(6, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(3, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
imported = self.cycle(root, cycles)
result = imported.f(constant_op.constant(2), constant_op.constant(5))
self.assertEqual(7, result[0].a.numpy())
self.assertEqual(10, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(5, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
def test_optimizer(self, cycles):
class _HasOptimizer(module.Module):
def __init__(self):
super(_HasOptimizer, self).__init__()
self.layer = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function
def __call__(self, x):
return self.layer(x)
@def_function.function
def train(self, x, y):
with backprop.GradientTape() as tape:
predicted = self(x)
loss = math_ops.reduce_sum(math_ops.abs(y - predicted))
train_vars = self.layer.trainable_variables
grads = tape.gradient(loss, train_vars)
self.optimizer.apply_gradients(zip(grads, train_vars))
root = _HasOptimizer()
train_input = dict(x=constant_op.constant([[1.]]),
y=constant_op.constant([[2.]]))
root.train(**train_input)
imported = self.cycle(root, cycles)
self.assertAllClose(root.optimizer.learning_rate.numpy(),
imported.optimizer.learning_rate.numpy())
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
root.train(**train_input)
imported.train(**train_input)
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
def test_positional_arguments(self, cycles):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_additional_kwargs(self, cycles):
def func(x, training=False, **options):
del options
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy())
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call.*"):
imported.f(x, learning_rate=0.5, epochs=4)
self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
def test_member_function(self, cycles):
class TrackableWithMember(tracking.AutoTrackable):
def __init__(self):
super(TrackableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = TrackableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self, cycles):
class M(tracking.AutoTrackable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.)
return x * self.var
m = M()
self.cycle(m, cycles)
self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
g = def_function.function(
lambda x: x*weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = g
imported = self.cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_nested_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
# Note: this function gets called from other function defs via a
# "PartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)])
def mul(x, y):
return x * y
# Note: this function gets called from other function defs via a
# "StatefulPartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return mul(weight.read_value(), x)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return f(x) + bias,
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def h(x):
return g(x) + bias,
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = h
imported = self.cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 2.0])
def test_callable(self, cycles):
class M1(tracking.AutoTrackable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def __call__(self, x):
return x
root = tracking.AutoTrackable()
root.m1 = M1()
root.m2 = tracking.AutoTrackable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
imported = self.cycle(root, cycles)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self, cycles):
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
root = tracking.AutoTrackable()
root.__call__ = tracking.AutoTrackable()
root.__call__.__call__ = tracking.AutoTrackable()
root.__call__.__call__.__call__ = func
imported = self.cycle(root, cycles)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
def test_load_in_graph_mode(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = load.load(path)
var_v1 = imported.v1
output = imported.f(constant_op.constant(2.))
with monitored_session.MonitoredSession() as sess:
self.assertEqual(1.0, sess.run(var_v1))
self.assertEqual(4.0, sess.run(output))
def test_load_in_func_graph(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
closure = tracking.AutoTrackable()
@def_function.function
def func(x):
if not hasattr(closure, "model"):
closure.model = load.load(path)
return closure.model.f(x)
inputs = constant_op.constant(2.)
self.assertEqual(4.0, func(inputs).numpy())
def test_soft_matching(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(1, len(concrete_functions))
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError, "Python inputs incompatible"):
# We cannot call the function with a constant of shape ().
imported.f(constant_op.constant(2)).numpy()
# TODO(vbardiovsky): When classes are revived with input_signatures, we
# should also check that the calls below are not generating any more
# concrete functions.
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_get_concrete_function(self, cycles):
@def_function.function
def func(x, training=False):
if training:
return 2 * x
else:
return 3 * x
func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32))
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(root, cycles)
concrete = imported.f.get_concrete_function(
training=True, x=tensor_spec.TensorSpec([None], dtypes.int32))
self.assertAllEqual([2, 4, 6, 8],
concrete(x=constant_op.constant([1, 2, 3, 4])).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
def test_concrete_function(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_arg_names(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6],
imported.f(x=constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_no_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(constant_op.constant([1]))
self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([6],
imported.f(constant_op.constant([3])).numpy())
def test_concrete_function_backprop(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.float32)])
def func(x):
return x ** 2.
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
def _compute_gradient(function):
with backprop.GradientTape() as tape:
inp = constant_op.constant(1.)
tape.watch(inp)
output = function(inp)
return tape.gradient(output, inp)
self.assertEqual(2., _compute_gradient(root.f).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(2., _compute_gradient(imported.f).numpy())
def test_revived_concrete_function_kwargs(self, cycles):
@def_function.function
def func(x, y):
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.float32))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_revived_concrete_function_tensorspec_kwargs(self, cycles):
@def_function.function
def func(*args):
x, y = args
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32, name="x"),
tensor_spec.TensorSpec([], dtypes.float32, name="y"))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_concrete_function_variable_argument(self, cycles):
# TODO(allenl): Fix variables in input signatures.
self.skipTest("Need to fix encoding of variables in inputs signatures")
capture = variables.Variable(0)
@def_function.function
def func(v):
v.assign_add(1)
capture.assign_sub(1)
vsave = variables.Variable(1)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(vsave)
root.capture = capture
self.assertEqual(1, vsave.numpy())
root.f(vsave)
self.assertEqual(2, vsave.numpy())
self.assertEqual(-1, capture.numpy())
imported = self.cycle(root, cycles)
vload = variables.Variable(1)
imported.f(vload)
self.assertEqual(2, vload.numpy())
imported.f(v=vload)
self.assertEqual(3, vload.numpy())
self.assertEqual(-3, imported.capture.numpy())
self.assertEqual(-1, capture.numpy())
def test_function_and_component(self, cycles):
@def_function.function
def func(v):
return v + 1
root = tracking.AutoTrackable()
root.func = func
root.concrete_func = func.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int32))
one = constant_op.constant(1)
self.assertEqual(2, root.func(one).numpy())
self.assertEqual(2, root.concrete_func(one).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(2, imported.func(one).numpy())
self.assertEqual(2, imported.concrete_func(one).numpy())
def test_dict(self, cycles):
root = tracking.AutoTrackable()
root.variables = dict(a=variables.Variable(1.))
root.variables["b"] = variables.Variable(2.)
root.variables["c"] = 1
root.funcs = dict(
a=def_function.function(lambda: constant_op.constant(100.)))
root.funcs["conc"] = root.funcs["a"].get_concrete_function()
imported = self.cycle(root, cycles)
self.assertEqual(1., imported.variables["a"].numpy())
self.assertEqual(2., imported.variables["b"].numpy())
self.assertEqual(set(["a", "b"]), set(imported.variables.keys()))
self.assertEqual(100., imported.funcs["a"]().numpy())
self.assertEqual(100., imported.funcs["conc"]().numpy())
def test_list(self, cycles):
root = tracking.AutoTrackable()
root.variables = [variables.Variable(1.)]
root.variables.append(1)
root.variables.append(variables.Variable(3.))
imported = self.cycle(root, cycles)
self.assertEqual(1., imported.variables[0].numpy())
self.assertEqual(3., imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertEqual(3, len(imported.variables))
def test_functions_list(self, cycles):
root = tracking.AutoTrackable()
v1 = variables.Variable(1.)
root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1 ** 2))]
root.variables = [v1]
@def_function.function
def _v2_loss():
if len(root.variables) == 1:
v2 = variables.Variable(2.)
root.variables.append(v2)
return math_ops.reduce_sum(root.variables[1] ** 2)
root.losses.append(_v2_loss)
self.assertAllClose([1., 4.], [loss() for loss in root.losses])
imported = self.cycle(root, cycles)
self.assertAllClose([1., 4.], [loss() for loss in imported.losses])
imported.variables[0].assign(3.)
imported.variables[1].assign(4.)
self.assertAllClose([9., 16.], [loss() for loss in imported.losses])
def test_captured_constant(self, cycles):
const = array_ops.zeros([100])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda: const + 1.)
root.g = def_function.function(lambda: const + 2.)
self.assertAllClose(array_ops.ones([100]), root.f())
self.assertAllClose(2. * array_ops.ones([100]), root.g())
imported = self.cycle(root, cycles)
self.assertAllClose(array_ops.ones([100]), imported.f())
self.assertAllClose(2. * array_ops.ones([100]), imported.g())
# TODO(b/123408994): Use the public get_concrete_function.
f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0]
g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0]
self.assertLen(f_concrete.captured_inputs, 1)
self.assertLen(g_concrete.captured_inputs, 1)
# We should be using the same captured EagerTensor in both functions, not
# duplicating the constant.
self.assertIs(f_concrete.captured_inputs[0],
g_concrete.captured_inputs[0])
def test_functions_accessed_once(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self._counter = 0
@property
def make_func(self):
@def_function.function
def f():
return constant_op.constant(self._counter)
f.get_concrete_function() # force a trace
self._counter += 1
return f
exported = Exported()
imported = self.cycle(exported, cycles)
self.assertEqual(0, imported.make_func().numpy())
self.assertEqual(1, exported.make_func().numpy())
def test_overwritten_signatures_error(self, cycles):
exported = tracking.AutoTrackable()
exported.f = def_function.function(lambda: constant_op.constant(1.))
imported = self.cycle(
exported, cycles,
signatures={"key": exported.f.get_concrete_function()})
self.assertEqual(1., imported.signatures["key"]()["output_0"].numpy())
imported.signatures = {"key1": imported.signatures["key"]}
with self.assertRaisesRegexp(ValueError, "signatures"):
save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
def test_signature_loading(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self.v = variables.Variable(3.)
@def_function.function
def do(self, x):
return self.v * x
exported = Exported()
imported = self.cycle(
exported,
cycles=1,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = self.cycle(imported, cycles=1, signatures=imported.signatures)
self.assertEqual(["serving_default"], list(imported.signatures.keys()))
imported_function = imported.signatures["serving_default"]
two = constant_op.constant(2.)
self.assertEqual(6., imported_function(x=two)["output_0"].numpy())
imported.v.assign(4.)
self.assertEqual(8., imported_function(x=two)["output_0"].numpy())
self.assertEqual(8., imported_function(two)["output_0"].numpy())
with self.assertRaises(TypeError):
# The signatures mapping is immutable
imported.signatures["random_key"] = 3
def test_multiple_argument_signatures_no_positional(self, cycles):
class Exported(tracking.AutoTrackable):
@def_function.function
def do(self, x, y):
return x + y
exported = Exported()
imported = self.cycle(
exported, cycles=1, signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = self.cycle(imported, cycles=1, signatures=imported.signatures)
with self.assertRaises(TypeError):
imported.signatures["serving_default"](
constant_op.constant(1.),
y=constant_op.constant(2.))
self.assertEqual(
{"output_0": 3.},
self.evaluate(imported.signatures["serving_default"](
x=constant_op.constant(1.),
y=constant_op.constant(2.))))
def _make_model_with_tables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values)
table1 = lookup_ops.HashTable(table1_initializer, default_val)
table2_file = self._make_asset("test\nfoo\nbrain\n")
table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file)
table2 = lookup_ops.HashTable(table2_initializer, default_val)
def _make_lookup_function(table):
signature = [tensor_spec.TensorSpec(None, dtypes.string)]
return def_function.function(input_signature=signature)(
lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda
root = tracking.AutoTrackable()
root.table1 = table1
root.lookup1 = _make_lookup_function(table1)
root.table2 = table2
root.lookup2 = _make_lookup_function(table2)
return root
def test_table(self, cycles):
root = self._make_model_with_tables()
imported = self.cycle(root, cycles, signatures={})
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy())
self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy())
def test_table_in_graph(self, cycles):
root = self._make_model_with_tables()
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = self.cycle(root, 1)
with ops.Graph().as_default():
imported = load.load(path)
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
output1 = imported.lookup1(keys)
output2 = imported.lookup2(keys)
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual([0, -1, -1, 2], sess.run(output1))
self.assertAllEqual([2, 0, 1, -1], sess.run(output2))
def test_perserve_argspec(self, cycles):
def f(a, b, c): # pylint: disable=unused-argument
return None
original_fullargspec = tf_inspect.getfullargspec(f)
root = tracking.AutoTrackable()
root.f = def_function.function(f)
imported = self.cycle(root, cycles)
restored_fullargspec = tf_inspect.getfullargspec(imported.f)
self.assertEqual(original_fullargspec, restored_fullargspec)
def test_canonicalize_inputs(self, cycles):
@def_function.function(autograph=False)
def func(a=1, b=2, c=3, training=True):
if training:
return [a, b, c, training]
else:
return [c, b, a, training]
# TODO(b/123501567): Work-around to trigger generic traces of a function
# with extra non tensor args.
signature = 3*[tensor_spec.TensorSpec(None, dtypes.float32)]
@def_function.function(input_signature=signature)
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegexp(ValueError,
"Could not find matching function"):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles):
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.ones([1])))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(), [1.0])
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0])
def test_partial_with_non_tensor_defaults(self, cycles):
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_passed_fn_as_default(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = self.cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_convert_to_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return x
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles):
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b")))
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = self.cycle(obj, cycles)
self.assertAllClose(3.,
imported(NamedTupleType(a=constant_op.constant(1.),
b=constant_op.constant(2.))))
def test_extra_args(self, cycles):
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = self.cycle(obj, cycles)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles):
@def_function.function(input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32)
])
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
output, = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual(
[None, 3], signature.inputs[0].shape.as_list())
self.assertEqual(
[None, 2], signature.inputs[1].shape.as_list())
self.assertEqual(
[None, 5], signature.outputs[0].shape.as_list())
def test_dense_features_layer(self, cycles):
columns = [feature_column_v2.numeric_column("x"),
feature_column_v2.numeric_column("y")]
layer = feature_column_v2.DenseFeatures(columns)
model = sequential.Sequential([layer])
model_input = {"x": constant_op.constant([[1.]]),
"y": constant_op.constant([[2.]])}
self.assertAllClose([[1., 2.]], model.predict(model_input))
loaded = self.cycle(model, cycles)
output, = loaded._default_save_signature(model_input).values()
self.assertAllClose([[1., 2.]], output)
signature_output, = loaded.signatures["serving_default"](
**model_input).values()
self.assertAllClose([[1., 2.]], signature_output)
def test_dense_features_layer_fit(self, cycles):
columns = [feature_column_v2.numeric_column("x")]
model = sequential.Sequential(
[feature_column_v2.DenseFeatures(columns),
core.Dense(1)])
model_input = {"x": constant_op.constant([[1.]])}
model.compile(optimizer="adam", loss="mse")
model.fit(model_input, constant_op.constant([[3.]]))
loaded = self.cycle(model, cycles)
loaded._default_save_signature(model_input)
loaded.signatures["serving_default"](**model_input)
class SingleCycleTests(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self):
root = tracking.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_docstring_examples(self):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = util.Checkpoint(v=variables.Variable(3.))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)])
save.save(exported, path)
imported = load.load(path)
self.assertEqual(3., imported.v.numpy())
self.assertEqual(6., imported.f(x=constant_op.constant(2.)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = load.load(path)
f = imported.signatures["serving_default"]
self.assertAllEqual(
[[-3.]],
f(x=constant_op.constant([[-1.]]))["output_0"].numpy())
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,310,007,591,043,251,000 | 34.863603 | 116 | 0.648698 | false |
alexforencich/hdg2000 | fpga/lib/axis/tb/test_axis_frame_fifo_64.py | 1 | 13782 | #!/usr/bin/env python
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_frame_fifo_64'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_frame_fifo_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[64:])
input_axis_tkeep = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[64:])
output_axis_tkeep = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_frame_fifo_64(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame.user = 1
source_queue.put(test_frame)
yield clk.posedge
yield delay(1000)
assert sink_queue.empty()
yield clk.posedge
print("test 8: single packet overflow")
current_test.next = 8
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256))*2)
source_queue.put(test_frame)
yield clk.posedge
yield delay(10000)
assert sink_queue.empty()
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| mit | -2,845,843,843,015,676,000 | 31.971292 | 117 | 0.500508 | false |
ternaris/marv-robotics | code/marv/marv/utils.py | 1 | 3364 | # Copyright 2016 - 2018 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import os
import re
import sys
import time
from datetime import datetime, timedelta
from datetime import tzinfo as tzinfo_base
from itertools import islice
from marv_api.utils import NOTSET
def chunked(iterable, chunk_size):
itr = iter(iterable)
return iter(lambda: tuple(islice(itr, chunk_size)), ())
def findfirst(predicate, iterable, default=NOTSET):
try:
return next(x for x in iterable if predicate(x))
except StopIteration:
if default is not NOTSET:
return default
raise ValueError('No item matched predicate!')
def mtime(path):
"""Wrap os.stat() st_mtime for ease of mocking."""
return os.stat(path).st_mtime
def stat(path):
"""Wrap os.stat() for ease of mocking.""" # noqa: D402
# TODO: https://github.com/PyCQA/pydocstyle/issues/284
return os.stat(path)
def walk(path):
"""Wrap os.walk() for ease of mocking.""" # noqa: D402
# TODO: https://github.com/PyCQA/pydocstyle/issues/284
return os.walk(path)
def now():
"""Wrap time.time() for ease of mocking."""
return time.time()
def parse_filesize(string):
val, unit = re.match(r'^\s*([0-9.]+)\s*([kmgtpezy]b?)?\s*$', string, re.I)\
.groups()
val = float(val)
if unit:
val *= 1 << (10 * (1 + 'kmgtpezy'.index(unit.lower()[0])))
return int(val)
def parse_datetime(string):
class TZInfo(tzinfo_base):
def __init__(self, offset=None):
self.offset = offset
def dst(self, dt):
raise NotImplementedError()
def tzname(self, dt):
return self.offset
def utcoffset(self, dt):
if self.offset == 'Z':
hours, minutes = 0, 0
else:
hours, minutes = self.offset[1:].split(':')
offset = timedelta(hours=int(hours), minutes=int(minutes))
return offset if self.offset[0] == '+' else -offset
groups = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)T'
r'(\d\d):(\d\d):(\d\d)((?:[+-]\d\d:\d\d)|Z)$', string)\
.groups()
tzinfo = TZInfo(groups[-1])
return datetime(*(int(x) for x in groups[:-1]), tzinfo=tzinfo)
def parse_timedelta(delta):
match = re.match(r'^\s*(?:(\d+)\s*h)?'
r'\s*(?:(\d+)\s*m)?'
r'\s*(?:(\d+)\s*s?)?\s*$', delta)
h, m, s = match.groups() if match else (None, None, None) # pylint: disable=invalid-name
return (int(h or 0) * 3600 + int(m or 0) * 60 + int(s or 0)) * 1000
def profile(func, sort='cumtime'):
# pylint: disable=import-outside-toplevel
import functools
import pstats
from cProfile import Profile
_profile = Profile()
@functools.wraps(func)
def profiled(*args, **kw):
_profile.enable()
result = func(*args, **kw)
_profile.disable()
stats = pstats.Stats(_profile).sort_stats(sort)
stats.print_stats()
return result
return profiled
def underscore_to_camelCase(string): # pylint: disable=invalid-name
return ''.join(x.capitalize() for x in string.split('_'))
def within_pyinstaller_bundle():
return any(x for x in sys.path if '/tmp/_MEI' in x)
def within_staticx_bundle():
return bool(os.environ.get('STATICX_PROG_PATH'))
| agpl-3.0 | -6,854,258,184,394,770,000 | 26.801653 | 93 | 0.58591 | false |
ericholscher/pip-1 | tests/test_upgrade.py | 1 | 8862 | import textwrap
from os.path import join
from nose.tools import nottest
from tests.test_pip import (here, reset_env, run_pip, assert_all_changes,
write_file, pyversion, _create_test_package,
_change_test_package_version)
from tests.local_repos import local_checkout
def test_no_upgrade_unless_requested():
"""
No upgrade if not specifically requested.
"""
reset_env()
run_pip('install', 'INITools==0.1', expect_error=True)
result = run_pip('install', 'INITools', expect_error=True)
assert not result.files_created, 'pip install INITools upgraded when it should not have'
def test_upgrade_to_specific_version():
"""
It does upgrade to specific version requested.
"""
env = reset_env()
run_pip('install', 'INITools==0.1', expect_error=True)
result = run_pip('install', 'INITools==0.2', expect_error=True)
assert result.files_created, 'pip install with specific version did not upgrade'
assert env.site_packages/'INITools-0.1-py%s.egg-info' % pyversion in result.files_deleted
assert env.site_packages/'INITools-0.2-py%s.egg-info' % pyversion in result.files_created
def test_upgrade_if_requested():
"""
And it does upgrade if requested.
"""
env = reset_env()
run_pip('install', 'INITools==0.1', expect_error=True)
result = run_pip('install', '--upgrade', 'INITools', expect_error=True)
assert result.files_created, 'pip install --upgrade did not upgrade'
assert env.site_packages/'INITools-0.1-py%s.egg-info' % pyversion not in result.files_created
def test_upgrade_with_newest_already_installed():
"""
If the newest version of a package is already installed, the package should
not be reinstalled and the user should be informed.
"""
find_links = 'file://' + join(here, 'packages')
env = reset_env()
run_pip('install', '-f', find_links, '--no-index', 'simple')
result = run_pip('install', '--upgrade', '-f', find_links, '--no-index', 'simple')
assert not result.files_created, 'simple upgraded when it should not have'
assert 'already up-to-date' in result.stdout, result.stdout
def test_upgrade_force_reinstall_newest():
"""
Force reinstallation of a package even if it is already at its newest
version if --force-reinstall is supplied.
"""
env = reset_env()
result = run_pip('install', 'INITools')
assert env.site_packages/ 'initools' in result.files_created, sorted(result.files_created.keys())
result2 = run_pip('install', '--upgrade', '--force-reinstall', 'INITools')
assert result2.files_updated, 'upgrade to INITools 0.3 failed'
result3 = run_pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [env.venv/'build', 'cache'])
def test_uninstall_before_upgrade():
"""
Automatic uninstall-before-upgrade.
"""
env = reset_env()
result = run_pip('install', 'INITools==0.2', expect_error=True)
assert env.site_packages/ 'initools' in result.files_created, sorted(result.files_created.keys())
result2 = run_pip('install', 'INITools==0.3', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = run_pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [env.venv/'build', 'cache'])
def test_uninstall_before_upgrade_from_url():
"""
Automatic uninstall-before-upgrade from URL.
"""
env = reset_env()
result = run_pip('install', 'INITools==0.2', expect_error=True)
assert env.site_packages/ 'initools' in result.files_created, sorted(result.files_created.keys())
result2 = run_pip('install', 'http://pypi.python.org/packages/source/I/INITools/INITools-0.3.tar.gz', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = run_pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [env.venv/'build', 'cache'])
def test_upgrade_to_same_version_from_url():
"""
When installing from a URL the same version that is already installed, no
need to uninstall and reinstall if --upgrade is not specified.
"""
env = reset_env()
result = run_pip('install', 'INITools==0.3', expect_error=True)
assert env.site_packages/ 'initools' in result.files_created, sorted(result.files_created.keys())
result2 = run_pip('install', 'http://pypi.python.org/packages/source/I/INITools/INITools-0.3.tar.gz', expect_error=True)
assert not result2.files_updated, 'INITools 0.3 reinstalled same version'
result3 = run_pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [env.venv/'build', 'cache'])
def test_upgrade_from_reqs_file():
"""
Upgrade from a requirements file.
"""
env = reset_env()
write_file('test-req.txt', textwrap.dedent("""\
PyLogo<0.4
# and something else to test out:
INITools==0.3
"""))
install_result = run_pip('install', '-r', env.scratch_path/ 'test-req.txt')
write_file('test-req.txt', textwrap.dedent("""\
PyLogo
# and something else to test out:
INITools
"""))
run_pip('install', '--upgrade', '-r', env.scratch_path/ 'test-req.txt')
uninstall_result = run_pip('uninstall', '-r', env.scratch_path/ 'test-req.txt', '-y')
assert_all_changes(install_result, uninstall_result, [env.venv/'build', 'cache', env.scratch/'test-req.txt'])
def test_uninstall_rollback():
"""
Test uninstall-rollback (using test package with a setup.py
crafted to fail on install).
"""
env = reset_env()
find_links = 'file://' + join(here, 'packages')
result = run_pip('install', '-f', find_links, '--no-index', 'broken==0.1')
assert env.site_packages / 'broken.py' in result.files_created, list(result.files_created.keys())
result2 = run_pip('install', '-f', find_links, '--no-index', 'broken==0.2broken', expect_error=True)
assert result2.returncode == 1, str(result2)
assert env.run('python', '-c', "import broken; print(broken.VERSION)").stdout == '0.1\n'
assert_all_changes(result.files_after, result2, [env.venv/'build', 'pip-log.txt'])
# Issue #530 - temporarily disable flaky test
@nottest
def test_editable_git_upgrade():
"""
Test installing an editable git package from a repository, upgrading the repository,
installing again, and check it gets the newer version
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
run_pip('install', '-e', '%s#egg=version_pkg' % ('git+file://' + version_pkg_path))
version = env.run('version_pkg')
assert '0.1' in version.stdout
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s#egg=version_pkg' % ('git+file://' + version_pkg_path))
version2 = env.run('version_pkg')
assert 'some different version' in version2.stdout, "Output: %s" % (version2.stdout)
def test_should_not_install_always_from_cache():
"""
If there is an old cached package, pip should download the newer version
Related to issue #175
"""
env = reset_env()
run_pip('install', 'INITools==0.2', expect_error=True)
run_pip('uninstall', '-y', 'INITools')
result = run_pip('install', 'INITools==0.1', expect_error=True)
assert env.site_packages/'INITools-0.2-py%s.egg-info' % pyversion not in result.files_created
assert env.site_packages/'INITools-0.1-py%s.egg-info' % pyversion in result.files_created
def test_install_with_ignoreinstalled_requested():
"""
It installs package if ignore installed is set.
"""
env = reset_env()
run_pip('install', 'INITools==0.1', expect_error=True)
result = run_pip('install', '-I', 'INITools', expect_error=True)
assert result.files_created, 'pip install -I did not install'
assert env.site_packages/'INITools-0.1-py%s.egg-info' % pyversion not in result.files_created
def test_upgrade_vcs_req_with_no_dists_found():
"""It can upgrade a VCS requirement that has no distributions otherwise."""
reset_env()
req = "%s#egg=pip-test-package" % local_checkout(
"git+http://github.com/pypa/pip-test-package.git")
run_pip("install", req)
result = run_pip("install", "-U", req)
assert not result.returncode
def test_upgrade_vcs_req_with_dist_found():
"""It can upgrade a VCS requirement that has distributions on the index."""
reset_env()
# TODO(pnasrat) Using local_checkout fails on windows - oddness with the test path urls/git.
req = "%s#egg=virtualenv" % "git+git://github.com/pypa/virtualenv@c21fef2c2d53cf19f49bcc37f9c058a33fb50499"
run_pip("install", req)
result = run_pip("install", "-U", req)
assert not "pypi.python.org" in result.stdout, result.stdout
| mit | -2,841,861,391,469,680,000 | 40.218605 | 124 | 0.664748 | false |
jmoiron/jmoiron.net | jmoiron/stream/models.py | 1 | 1223 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""stream data access models"""
from flask import Blueprint, render_template
from micromongo import *
__all__ = ["Entry", "Plugin", "blueprint", "url_prefix"]
blueprint = Blueprint("stream", __name__,
template_folder="templates",
static_folder="static",
)
url_prefix="/stream"
class Entry(Model):
collection = "jmoiron.stream_entry"
spec = {
"source_tag" : Field(required=True, type=basestring),
"title" : Field(required=True, type=basestring),
"permalink": Field(type=basestring),
"data": Field(required=True),
"id": Field(type=int), # legacy
}
def pre_save(self):
self.id = self.find().count() + 1
self.rendered = self._render()
def _render(self):
template = "stream/plugins/%s.html" % (self.source_tag)
return render_template(template, entry=self)
class Plugin(Model):
collection = "jmoiron.stream_plugin"
spec = {
"tag" : Field(required=True, type=basestring),
"name" : Field(required=True, type=basestring),
"id" : Field(type=int), # legacy
"interval" : Field(type=int),
"arguments": Field(required=True)
}
| mit | -1,370,310,344,401,744,100 | 25.021277 | 63 | 0.606705 | false |
zhm/s3_cmd_bin | resources/run-tests.py | 1 | 21749 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
## Amazon S3cmd - testsuite
## Author: Michal Ludvig <[email protected]>
## http://www.logix.cz/michal
## License: GPL Version 2
import sys
import os
import re
from subprocess import Popen, PIPE, STDOUT
import locale
import pwd
count_pass = 0
count_fail = 0
count_skip = 0
test_counter = 0
run_tests = []
exclude_tests = []
verbose = False
if os.name == "posix":
have_wget = True
elif os.name == "nt":
have_wget = False
else:
print "Unknown platform: %s" % os.name
sys.exit(1)
## Unpack testsuite/ directory
if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'):
os.system("tar -xz -f testsuite.tar.gz")
if not os.path.isdir('testsuite'):
print "Something went wrong while unpacking testsuite.tar.gz"
sys.exit(1)
os.system("tar -xf testsuite/checksum.tar -C testsuite")
if not os.path.isfile('testsuite/checksum/cksum33.txt'):
print "Something went wrong while unpacking testsuite/checkum.tar"
sys.exit(1)
## Fix up permissions for permission-denied tests
os.chmod("testsuite/permission-tests/permission-denied-dir", 0444)
os.chmod("testsuite/permission-tests/permission-denied.txt", 0000)
## Patterns for Unicode tests
patterns = {}
patterns['UTF-8'] = u"ŪņЇЌœđЗ/☺ unicode € rocks ™"
patterns['GBK'] = u"12月31日/1-特色條目"
encoding = locale.getpreferredencoding()
if not encoding:
print "Guessing current system encoding failed. Consider setting $LANG variable."
sys.exit(1)
else:
print "System encoding: " + encoding
have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding):
os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
if have_encoding:
#enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
enc_pattern = patterns[encoding]
else:
print encoding + " specific files not found."
if not os.path.isdir('testsuite/crappy-file-name'):
os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
# TODO: also unpack if the tarball is newer than the directory timestamp
# for instance when a new version was pulled from SVN.
def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [], must_find_re = [], must_not_find_re = []):
def command_output():
print "----"
print " ".join([arg.find(" ")>=0 and "'%s'" % arg or arg for arg in cmd_args])
print "----"
print stdout
print "----"
def failure(message = ""):
global count_fail
if message:
message = " (%r)" % message
print "\x1b[31;1mFAIL%s\x1b[0m" % (message)
count_fail += 1
command_output()
#return 1
sys.exit(1)
def success(message = ""):
global count_pass
if message:
message = " (%r)" % message
print "\x1b[32;1mOK\x1b[0m%s" % (message)
count_pass += 1
if verbose:
command_output()
return 0
def skip(message = ""):
global count_skip
if message:
message = " (%r)" % message
print "\x1b[33;1mSKIP\x1b[0m%s" % (message)
count_skip += 1
return 0
def compile_list(_list, regexps = False):
if regexps == False:
_list = [re.escape(item.encode(encoding, "replace")) for item in _list]
return [re.compile(item, re.MULTILINE) for item in _list]
global test_counter
test_counter += 1
print ("%3d %s " % (test_counter, label)).ljust(30, "."),
sys.stdout.flush()
if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
return skip()
if not cmd_args:
return skip()
p = Popen(cmd_args, stdout = PIPE, stderr = STDOUT, universal_newlines = True)
stdout, stderr = p.communicate()
if retcode != p.returncode:
return failure("retcode: %d, expected: %d" % (p.returncode, retcode))
if type(must_find) not in [ list, tuple ]: must_find = [must_find]
if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
find_list = []
find_list.extend(compile_list(must_find))
find_list.extend(compile_list(must_find_re, regexps = True))
find_list_patterns = []
find_list_patterns.extend(must_find)
find_list_patterns.extend(must_find_re)
not_find_list = []
not_find_list.extend(compile_list(must_not_find))
not_find_list.extend(compile_list(must_not_find_re, regexps = True))
not_find_list_patterns = []
not_find_list_patterns.extend(must_not_find)
not_find_list_patterns.extend(must_not_find_re)
for index in range(len(find_list)):
match = find_list[index].search(stdout)
if not match:
return failure("pattern not found: %s" % find_list_patterns[index])
for index in range(len(not_find_list)):
match = not_find_list[index].search(stdout)
if match:
return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
return success()
def test_s3cmd(label, cmd_args = [], **kwargs):
if not cmd_args[0].endswith("s3cmd"):
cmd_args.insert(0, "python")
cmd_args.insert(1, "s3cmd")
return test(label, cmd_args, **kwargs)
def test_mkdir(label, dir_name):
if os.name in ("posix", "nt"):
cmd = ['mkdir', '-p']
else:
print "Unknown platform: %s" % os.name
sys.exit(1)
cmd.append(dir_name)
return test(label, cmd)
def test_rmdir(label, dir_name):
if os.path.isdir(dir_name):
if os.name == "posix":
cmd = ['rm', '-rf']
elif os.name == "nt":
cmd = ['rmdir', '/s/q']
else:
print "Unknown platform: %s" % os.name
sys.exit(1)
cmd.append(dir_name)
return test(label, cmd)
else:
return test(label, [])
def test_flushdir(label, dir_name):
test_rmdir(label + "(rm)", dir_name)
return test_mkdir(label + "(mk)", dir_name)
def test_copy(label, src_file, dst_file):
if os.name == "posix":
cmd = ['cp', '-f']
elif os.name == "nt":
cmd = ['copy']
else:
print "Unknown platform: %s" % os.name
sys.exit(1)
cmd.append(src_file)
cmd.append(dst_file)
return test(label, cmd)
try:
pwd = pwd.getpwuid(os.getuid())
bucket_prefix = "%s.%s-" % (pwd.pw_name, pwd.pw_uid)
except:
bucket_prefix = ''
print "Using bucket prefix: '%s'" % bucket_prefix
argv = sys.argv[1:]
while argv:
arg = argv.pop(0)
if arg.startswith('--bucket-prefix='):
print "Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'"
sys.exit(0)
if arg in ("-h", "--help"):
print "%s A B K..O -N" % sys.argv[0]
print "Run tests number A, B and K through to O, except for N"
sys.exit(0)
if arg in ("-l", "--list"):
exclude_tests = range(0, 999)
break
if arg in ("-v", "--verbose"):
verbose = True
continue
if arg in ("-p", "--bucket-prefix"):
try:
bucket_prefix = argv.pop(0)
except IndexError:
print "Bucket prefix option must explicitly supply a bucket name prefix"
sys.exit(0)
continue
if arg.find("..") >= 0:
range_idx = arg.find("..")
range_start = arg[:range_idx] or 0
range_end = arg[range_idx+2:] or 999
run_tests.extend(range(int(range_start), int(range_end) + 1))
elif arg.startswith("-"):
exclude_tests.append(int(arg[1:]))
else:
run_tests.append(int(arg))
if not run_tests:
run_tests = range(0, 999)
# helper functions for generating bucket names
def bucket(tail):
'''Test bucket name'''
label = 'autotest'
if str(tail) == '3':
label = 'Autotest'
return '%ss3cmd-%s-%s' % (bucket_prefix, label, tail)
def pbucket(tail):
'''Like bucket(), but prepends "s3://" for you'''
return 's3://' + bucket(tail)
## ====== Remove test buckets
test_s3cmd("Remove test buckets", ['rb', '-r', pbucket(1), pbucket(2), pbucket(3)],
must_find = [ "Bucket '%s/' removed" % pbucket(1),
"Bucket '%s/' removed" % pbucket(2),
"Bucket '%s/' removed" % pbucket(3) ])
## ====== Create one bucket (EU)
test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)],
must_find = "Bucket '%s/' created" % pbucket(1))
## ====== Create multiple buckets
test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)],
must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
## ====== Invalid bucket name
test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')],
retcode = 1,
must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'),
must_not_find_re = "Bucket.*created")
## ====== Buckets list
test_s3cmd("Buckets list", ["ls"],
must_find = [ "autotest-1", "autotest-2", "Autotest-3" ], must_not_find_re = "autotest-EU")
## ====== Sync to S3
test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings' ],
must_find = [ "WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\]^<>%%\"'#{}`&?.end",
"WARNING: File can not be uploaded: testsuite/permission-tests/permission-denied.txt: Permission denied",
"stored as '%s/xyz/crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\\]^<>%%%%\"'#{}`&?.end'" % pbucket(1) ],
must_not_find_re = [ "demo/", "\.png$", "permission-denied-dir" ])
if have_encoding:
## ====== Sync UTF-8 / GBK / ... to S3
test_s3cmd("Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
must_find = [ u"File 'testsuite/encodings/%(encoding)s/%(pattern)s' stored as '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
## ====== List bucket content
test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ],
must_find_re = [ u"DIR %s/xyz/binary/$" % pbucket(1) , u"DIR %s/xyz/etc/$" % pbucket(1) ],
must_not_find = [ u"random-crap.md5", u"/demo" ])
## ====== List bucket recursive
must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ]
if have_encoding:
must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)],
must_find = must_find,
must_not_find = [ "logo.png" ])
## ====== FIXME
# test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ])
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Sync from S3
must_find = [ "File '%s/xyz/binary/random-crap.md5' stored as 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ]
if have_encoding:
must_find.append(u"File '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' stored as 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = must_find)
## ====== Remove 'demo' directory
test_rmdir("Remove 'dir-test/'", "testsuite-out/xyz/dir-test/")
## ====== Create dir with name of a file
test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir")
## ====== Skip dst dirs
test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = "WARNING: testsuite-out/xyz/dir-test/file-dir is a directory - skipping over")
## ====== Clean up local destination dir
test_flushdir("Clean testsuite-out/", "testsuite-out")
## ====== Put public, guess MIME
test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)],
must_find = [ "stored as '%s/xyz/etc/logo.png'" % pbucket(1) ])
## ====== Retrieve from URL
if have_wget:
test("Retrieve from URL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
## ====== Change ACL to Private
test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)],
must_find = [ "logo.png: ACL set to Private" ])
## ====== Verify Private ACL
if have_wget:
test("Verify Private ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
retcode = 8,
must_find_re = [ 'ERROR 403: Forbidden' ])
## ====== Change ACL to Public
test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'],
must_find = [ "logo.png: ACL set to Public" ])
## ====== Verify Public ACL
if have_wget:
test("Verify Public ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)],
must_find_re = [ 'logo.png.*saved \[22059/22059\]' ])
## ====== Sync more to S3
test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt' ],
must_find = [ "File 'testsuite/demo/some-file.xml' stored as '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
must_not_find = [ "File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png" % pbucket(1) ])
## ====== Don't check MD5 sum on Sync
test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt")
test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt")
test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5'],
must_find = [ "cksum33.txt" ],
must_not_find = [ "cksum1.txt" ])
## ====== Check MD5 sum on Sync
test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5'],
must_find = [ "cksum1.txt" ])
## ====== Rename within S3
test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
must_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1))])
## ====== Rename (NoSuchKey)
test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
retcode = 1,
must_find_re = [ 'ERROR:.*NoSuchKey' ],
must_not_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1)) ])
## ====== Sync more from S3
test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'],
must_find = [ "deleted: testsuite-out/logo.png",
"File '%s/xyz/etc2/Logo.PNG' stored as 'testsuite-out/xyz/etc2/Logo.PNG' (22059 bytes" % pbucket(1),
"File '%s/xyz/demo/some-file.xml' stored as 'testsuite-out/xyz/demo/some-file.xml' " % pbucket(1) ],
must_not_find_re = [ "not-deleted.*etc/logo.png" ])
## ====== Make dst dir for get
test_rmdir("Remove dst dir for get", "testsuite-out")
## ====== Get multiple files
test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
retcode = 1,
must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ])
## ====== Make dst dir for get
test_mkdir("Make dst dir for get", "testsuite-out")
## ====== Get multiple files
test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
must_find = [ u"saved as 'testsuite-out/Logo.PNG'", u"saved as 'testsuite-out/AtomicClockRadio.ttf'" ])
## ====== Upload files differing in capitalisation
test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)],
must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
## ====== Copy between buckets
test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)],
must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/xyz/etc2/logo.png" % (pbucket(1), pbucket(3)) ])
## ====== Recursive copy
test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'],
must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
"File %s/xyz/blahBlah/Blah.txt copied to %s/copy/blahBlah/Blah.txt" % (pbucket(1), pbucket(2)),
"File %s/xyz/blahBlah/blah.txt copied to %s/copy/blahBlah/blah.txt" % (pbucket(1), pbucket(2)) ],
must_not_find = [ "demo/dir1/file1-1.txt" ])
## ====== Verify ACL and MIME type
test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
must_find_re = [ "MIME type:.*image/png",
"ACL:.*\*anon\*: READ",
"URL:.*http://%s.s3.amazonaws.com/copy/etc2/Logo.PNG" % bucket(2) ])
## ====== Rename within S3
test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)],
must_find = [ 'File %s/copy/etc2/Logo.PNG moved to %s/copy/etc/logo.png' % (pbucket(2), pbucket(2))])
## ====== Sync between buckets
test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
must_find = [ "File %s/xyz/demo/dir1/file1-1.txt copied to %s/copy/demo/dir1/file1-1.txt" % (pbucket(1), pbucket(2)),
"File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)),
"deleted: '%s/copy/etc/logo.png'" % pbucket(2) ],
must_not_find = [ "blah.txt" ])
## ====== Don't Put symbolic link
test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),],
must_not_find_re = [ "linked1.png"])
## ====== Put symbolic link
test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ],
must_find = [ "File 'testsuite/etc/linked1.png' stored as '%s/xyz/linked1.png'" % pbucket(1)])
## ====== Sync symbolic links
test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ],
must_find = ["File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png'" % pbucket(1)],
# Don't want to recursively copy linked directories!
must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
"etc/brokenlink.png"],
)
## ====== Multi source move
test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)],
must_find = [ "File %s/copy/blahBlah/Blah.txt moved to %s/moved/Blah.txt" % (pbucket(2), pbucket(2)),
"File %s/copy/etc/AtomicClockRadio.ttf moved to %s/moved/AtomicClockRadio.ttf" % (pbucket(2), pbucket(2)),
"File %s/copy/etc/TypeRa.ttf moved to %s/moved/TypeRa.ttf" % (pbucket(2), pbucket(2)) ],
must_not_find = [ "blah.txt" ])
## ====== Verify move
test_s3cmd("Verify move", ['ls', '-r', pbucket(2)],
must_find = [ "%s/moved/Blah.txt" % pbucket(2),
"%s/moved/AtomicClockRadio.ttf" % pbucket(2),
"%s/moved/TypeRa.ttf" % pbucket(2),
"%s/copy/blahBlah/blah.txt" % pbucket(2) ],
must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
"%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
"%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
## ====== Simple delete
test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
must_find = [ "File %s/xyz/etc2/Logo.PNG deleted" % pbucket(1) ])
## ====== Recursive delete
test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
must_find = [ "File %s/xyz/etc/TypeRa.ttf deleted" % pbucket(1) ],
must_find_re = [ "File .*/etc/logo.png deleted" ],
must_not_find = [ "AtomicClockRadio.ttf" ])
## ====== Recursive delete all
test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)],
must_find_re = [ "File .*binary/random-crap deleted" ])
## ====== Remove empty bucket
test_s3cmd("Remove empty bucket", ['rb', pbucket(1)],
must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
## ====== Remove remaining buckets
test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)],
must_find = [ "Bucket '%s/' removed" % pbucket(2),
"Bucket '%s/' removed" % pbucket(3) ])
# vim:et:ts=4:sts=4:ai
| mit | -1,690,160,694,387,922,000 | 39.454376 | 225 | 0.595885 | false |
dataplumber/nexus | climatology/clim/pixelStats.py | 1 | 8206 | """
pixelStats.py
Compute a multi-epoch (multi-day) statistics for each lat/lon pixel read from daily Level-3 grids.
Also do statistics roll-ups from daily to monthly, monthly to seasonal, seasonal to yearly,
yearly to multi-year, and multi-year to total N-year period.
Simple code to be run using Spark or Dpark.
"""
import sys, os, urllib, re, time
import numpy as N
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as M
from netCDF4 import Dataset, default_fillvals
from variables import getVariables, close
from split import splitByMonth
from cache import retrieveFile, CachePath
#from pyspark import SparkContext # both imported below when needed
#import dpark
Modes = ['sequential', 'dpark', 'spark']
Accumulators = ['count', 'sum', 'sumsq', 'min', 'max']
Stats = ['count', 'mean', 'stddev', 'min', 'max']
GroupByKeys = ['month', 'season', 'year', '3-year', 'total']
TimeFromFilenameDOY = {'get': ('year', 'doy'), 'regex': re.compile(r'\/A(....)(...)')}
def pixelStats(urls, variable, nPartitions, timeFromFilename=TimeFromFilenameDOY, groupByKeys=GroupByKeys, accumulators=Accumulators,
cachePath=CachePath, mode='dpark', modes=Modes):
'''Compute a global (or regional) pixel mean field in parallel, given a list of URL's pointing to netCDF files.'''
baseKey = groupByKeys[0]
if baseKey == 'month':
urlsByKey = splitByMonth(urls, timeFromFilename)
else:
print >>sys.stderr, 'pixelStats: Unrecognized groupByKey "%s". Must be in %s' % (baseKey, str(groupByKeys))
sys.exit(1)
if mode == 'sequential':
accum = [accumulate(u, variable, accumulators) for u in urlsByKey]
merged = reduce(combine, accum)
stats = statsFromAccumulators(merged)
elif mode == 'dpark':
import dpark
urls = dpark.parallelize(urlsByKey, nPartitions) # returns RDD of URL lists
accum = urls.map(lambda urls: accumulate(urls, variable, accumulators)) # returns RDD of stats accumulators
merged = accum.reduce(combine) # merged accumulators on head node
stats = statsFromAccumulators(merged) # compute final stats from accumulators
elif mode == 'spark':
from pyspark import SparkContext
sc = SparkContext(appName="PixelStats")
urls = sc.parallelize(urlsByKey, nPartitions) # returns RDD of URL lists
accum = urls.map(lambda urls: accumulate(urls, variable, accumulators)) # returns RDD of stats accumulators
merged = accum.reduce(combine) # merged accumulators on head node
stats = statsFromAccumulators(merged) # compute final stats from accumulators
else:
stats = None
if mode not in modes:
print >>sys.stderr, 'pixelStats: Unrecognized mode "%s". Must be in %s' % (mode, str(modes))
sys.exit(1)
return stats
def accumulate(urls, variable, accumulators, cachePath=CachePath):
'''Accumulate data into statistics accumulators like count, sum, sumsq, min, max, M3, M4, etc.'''
keys, urls = urls
accum = {}
for i, url in enumerate(urls):
try:
path = retrieveFile(url, cachePath)
fn = os.path.split(path)[1]
except:
print >>sys.stderr, 'accumulate: Error, continuing without file %s' % url
continue
try:
var, fh = getVariables(path, [variable], arrayOnly=True, set_auto_mask=True) # return dict of variable objects by name
v = var[variable] # masked array
close(fh)
except:
print >>sys.stderr, 'accumulate: Error, cannot read variable %s from file %s' % (variable, path)
continue
if i == 0:
for k in accumulators:
if k == 'min': accum[k] = default_fillvals['f8'] * N.ones(v.shape, dtype=N.float64)
elif k == 'max': accum[k] = -default_fillvals['f8'] * N.ones(v.shape, dtype=N.float64)
elif k == 'count': accum[k] = N.zeros(v.shape, dtype=N.int64)
else:
accum[k] = N.zeros(v.shape, dtype=N.float64)
if 'count' in accumulators:
accum['count'] += ~v.mask
if 'min' in accumulators:
accum['min'] = N.ma.minimum(accum['min'], v)
if 'max' in accumulators:
accum['max'] = N.ma.maximum(accum['max'], v)
v = N.ma.filled(v, 0.)
if 'sum' in accumulators:
accum['sum'] += v
if 'sumsq' in accumulators:
accum['sumsq'] += v*v
return (keys, accum)
def combine(a, b):
'''Combine accumulators by summing.'''
keys, a = a
b = b[1]
for k in a.keys():
if k != 'min' and k != 'max':
a[k] += b[k]
if 'min' in accumulators:
a['min'] = N.ma.minimum(a['min'], b['min'])
if 'max' in accumulators:
a['max'] = N.ma.maximum(a['max'], b['max'])
return (('total',), a)
def statsFromAccumulators(accum):
'''Compute final statistics from accumulators.'''
keys, accum = accum
# Mask all of the accumulator arrays
accum['count'] = N.ma.masked_equal(accum['count'], 0, copy=False)
mask = accum['count'].mask
for k in accum:
if k != 'count':
accum[k] = N.ma.array(accum[k], copy=False, mask=mask)
# Compute stats (masked)
stats = {}
if 'count' in accum:
stats['count'] = accum['count']
if 'min' in accum:
stats['min'] = accum['min']
if 'max' in accum:
stats['max'] = accum['max']
if 'sum' in accum:
stats['mean'] = accum['sum'] / accum['count']
if 'sumsq' in accum:
stats['stddev'] = N.sqrt(accum['sumsq'] / (accum['count'].astype(N.float32) - 1))
return (keys, stats)
def writeStats(urls, variable, stats, outFile, copyToHdfsPath=None, format='NETCDF4', cachePath=CachePath):
'''Write out stats arrays to netCDF with some attributes.
'''
keys, stats = stats
dout = Dataset(outFile, 'w', format=format)
print >>sys.stderr, 'Writing %s ...' % outFile
dout.setncattr('variable', variable)
dout.setncattr('urls', str(urls))
dout.setncattr('level', str(keys))
inFile = retrieveFile(urls[0], cachePath)
din = Dataset(inFile, 'r')
try:
coordinates = din.variables[variable].getncattr('coordinates')
coordinates = coordinates.split()
except:
coordinates = ('lat', 'lon') # kludge: FIX ME
# Add dimensions and variables, copying data
coordDim = [dout.createDimension(coord, din.variables[coord].shape[0]) for coord in coordinates] # here lat, lon, alt, etc.
for coord in coordinates:
var = dout.createVariable(coord, din.variables[coord].dtype, (coord,))
var[:] = din.variables[coord][:]
# Add stats variables
for k,v in stats.items():
var = dout.createVariable(k, stats[k].dtype, coordinates)
var[:] = v[:]
din.close()
dout.close()
return outFile
def totalStats(args):
urlFile = args[0]
with open(urlFile, 'r') as f:
urls = [line.strip() for line in f]
variable = args[1]
mode = args[2]
nPartitions = int(args[3])
outFile = args[4]
stats = pixelStats(urls, variable, nPartitions, mode=mode)
outFile = writeStats(urls, variable, stats, outFile)
return outFile
def main(args):
return totalStats(args)
if __name__ == '__main__':
print main(sys.argv[1:])
# python pixelStats.py urls_sst_daynight_2003_3days.txt sst sequential 1 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst sequential 1 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst dpark 4 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_4months.txt sst spark 4 modis_sst_stats_test.nc
# python pixelStats.py urls_sst_daynight_2003_2015.txt sst dpark 16 modis_sst_stats.nc
# python pixelStats.py urls_sst_daynight_2003_2015.txt sst spark 16 modis_sst_stats.nc
| apache-2.0 | -6,013,710,985,132,484,000 | 36.815668 | 133 | 0.610041 | false |
wchen1994/Alignments-of-Sequences | alignSeq/app4.py | 1 | 5765 | import url_file_read as provided
import math
import random
import project4
import matplotlib.pyplot as plt
protein_human = provided.read_protein(provided.HUMAN_EYELESS_URL)
protein_fly = provided.read_protein(provided.FRUITFLY_EYELESS_URL)
scoring_matrix = provided.read_scoring_matrix(provided.PAM50_URL)
"""
caluclate the score of alignment of human and fruit fly eyeless protein
"""
def align_human_fly_protein():
alignment_matrix = project4.compute_alignment_matrix(protein_human, protein_fly, scoring_matrix, False)
result = project4.compute_local_alignment(protein_human, protein_fly, scoring_matrix, alignment_matrix)
return result
#score, human_protein_aligned, fly_protein_aligned = align_human_fly_protein()
#print score
"""
calculate the ratio of similar the human of fly compare to consensus protein
"""
def calculate_similar_ratio():
result = align_human_fly_protein()
sequence_human = result[1].replace('-', '')
sequence_fly = result[2].replace('-', '')
protein_consensus = provided.read_protein(provided.CONSENSUS_PAX_URL)
alignment_matrix = project4.compute_alignment_matrix(sequence_human, protein_consensus, scoring_matrix, True)
result = project4.compute_global_alignment(sequence_human, protein_consensus, scoring_matrix, alignment_matrix)
mark = 0
for idx in range(len(result[1])):
if result[1][idx] == result[2][idx]:
mark += 1
print mark / float(len(result[1]))
protein_consensus = provided.read_protein(provided.CONSENSUS_PAX_URL)
alignment_matrix = project4.compute_alignment_matrix(sequence_fly, protein_consensus, scoring_matrix, True)
result = project4.compute_global_alignment(sequence_fly, protein_consensus, scoring_matrix, alignment_matrix)
mark = 0
for idx in range(len(result[1])):
if result[1][idx] == result[2][idx]:
mark += 1
print mark / float(len(result[1]))
#calculate_diff_ratio()
"""
ploting histogram about score of alignment to the disorder sequence
"""
def save_dict(dictionary):
dict_file = open("distribution.csv", "w")
for key, value in dictionary.items():
dict_file.write(str(key)+","+str(value)+"\n")
dict_file.close()
def read_dict(fname):
dict_file = open(fname, "r")
dictionary = {}
for line in dict_file:
line = line.strip()
key, value = line.split(",")
dictionary[int(key)] = int(value)
return dictionary
def generate_null_distribution(seq_x, seq_y, scoring_matrix, num_trials):
distribution = {}
bar = progressbar.ProgressBar(max_value=1000)
for progress in range(num_trials):
bar.update(progress)
rand_y = list(seq_y)
random.shuffle(rand_y)
alignment_matrix = project4.compute_alignment_matrix(seq_x, rand_y, scoring_matrix, False)
score = project4.compute_local_alignment(seq_x, rand_y, scoring_matrix, alignment_matrix)[0]
distribution[score] = distribution.get(score,0) + 1
save_dict(distribution)
return distribution
def plot_histogram():
READ = True
if READ:
dist =read_dict("distribution.csv")
else:
dist = generate_null_distribution(protein_human, protein_fly, scoring_matrix, 1000)
x = dist.keys()
y = dist.values()
y_normal = [idx/1000.0 for idx in y]
plt.bar(x, y_normal)
plt.title("Null distribution using 1000 trials")
plt.xlabel("Scores")
plt.ylabel("Fraction of trials")
plt.show()
#plot_histogram()
"""
calculate the mean and stdard deviation of the distribution of score over disorder sequence
"""
def cal_mean_stdv():
score_list = []
dist =read_dict("distribution.csv")
for score, appearance in dist.items():
score_list += [score] * appearance
mean = sum(score_list)/float(len(score_list))
stdv = math.sqrt(sum([(value - mean) ** 2 for value in score_list])/float(len(score_list)))
return mean, stdv
#print cal_mean_stdv()
"""
Spelling Checking
"""
word_list = provided.read_words(provided.WORD_LIST_URL)
def check_spelling(checked_word, dist, word_list):
# scoring matrix for edit distaion
# edit distance = |x| + |y| - score(X,Y)
# diag_socre = 2, off_diag_score = 1, dash_score = 0
alphabets = set("abcdefghijklmnopqrstuvwxyz")
scoring_matrix = project4.build_scoring_matrix(alphabets,2,1,0)
string_set = set([])
for word in word_list:
alignment_matrix = project4.compute_alignment_matrix(checked_word ,word, scoring_matrix, True)
score, _, _ = project4.compute_global_alignment(checked_word, word, scoring_matrix, alignment_matrix)
score = len(checked_word) + len(word) - score
if score <= dist:
string_set.add(word)
return string_set
def fast_check_spelling(checked_word, dist, word_list):
word_set = set(word_list)
align_set = set([])
for word in word_set:
if check_valid(checked_word, word, dist):
align_set.add(word)
return align_set
def check_valid(checked_word, word, dist):
if dist < 0:
return False
elif checked_word and word:
if checked_word[0] == word[0]:
return check_valid(checked_word[1:], word[1:], dist)
else:
return check_valid(checked_word, word[1:], dist - 1) or check_valid(checked_word[1:], word, dist - 1) or check_valid(checked_word[1:], word[1:], dist - 1)
elif checked_word:
if dist - len(checked_word) < 0:
return False
else:
return True
elif word:
if dist - len(word) < 0:
return False
else:
return True
else:
return True
#print fast_check_spelling("humble", 1, word_list)
#print fast_check_spelling("firefly", 2, word_list)
| mit | -3,805,425,347,808,624,000 | 33.520958 | 166 | 0.662272 | false |
litebitcoins/litebitcoin | test/functional/test_framework/mininode.py | 1 | 57674 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import litebitcoin_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(litebitcoin_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Litebitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| mit | 8,927,728,029,096,172,000 | 30.091105 | 262 | 0.578978 | false |
escapewindow/signingscript | src/signingscript/vendored/mozbuild/mozbuild/test/backend/test_build.py | 2 | 9822 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals, print_function
import buildconfig
import os
import shutil
import sys
import unittest
import mozpack.path as mozpath
from contextlib import contextmanager
from mozunit import main
from mozbuild.backend import get_backend_class
from mozbuild.backend.configenvironment import ConfigEnvironment
from mozbuild.backend.recursivemake import RecursiveMakeBackend
from mozbuild.backend.fastermake import FasterMakeBackend
from mozbuild.base import MozbuildObject
from mozbuild.frontend.emitter import TreeMetadataEmitter
from mozbuild.frontend.reader import BuildReader
from mozbuild.util import ensureParentDir
from mozpack.files import FileFinder
from tempfile import mkdtemp
BASE_SUBSTS = [
('PYTHON', mozpath.normsep(sys.executable)),
('MOZ_UI_LOCALE', 'en-US'),
]
class TestBuild(unittest.TestCase):
def setUp(self):
self._old_env = dict(os.environ)
os.environ.pop('MOZCONFIG', None)
os.environ.pop('MOZ_OBJDIR', None)
os.environ.pop('MOZ_PGO', None)
def tearDown(self):
os.environ.clear()
os.environ.update(self._old_env)
@contextmanager
def do_test_backend(self, *backends, **kwargs):
# Create the objdir in the srcdir to ensure that they share
# the same drive on Windows.
topobjdir = mkdtemp(dir=buildconfig.topsrcdir)
try:
config = ConfigEnvironment(buildconfig.topsrcdir, topobjdir,
**kwargs)
reader = BuildReader(config)
emitter = TreeMetadataEmitter(config)
moz_build = mozpath.join(config.topsrcdir, 'test.mozbuild')
definitions = list(emitter.emit(
reader.read_mozbuild(moz_build, config)))
for backend in backends:
backend(config).consume(definitions)
yield config
except Exception:
raise
finally:
if not os.environ.get('MOZ_NO_CLEANUP'):
shutil.rmtree(topobjdir)
@contextmanager
def line_handler(self):
lines = []
def handle_make_line(line):
lines.append(line)
try:
yield handle_make_line
except Exception:
print('\n'.join(lines))
raise
if os.environ.get('MOZ_VERBOSE_MAKE'):
print('\n'.join(lines))
def test_recursive_make(self):
substs = list(BASE_SUBSTS)
with self.do_test_backend(RecursiveMakeBackend,
substs=substs) as config:
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
overrides = [
'install_manifest_depends=',
'MOZ_JAR_MAKER_FILE_FORMAT=flat',
'TEST_MOZBUILD=1',
]
with self.line_handler() as handle_make_line:
build._run_make(directory=config.topobjdir, target=overrides,
silent=False, line_handler=handle_make_line)
self.validate(config)
def test_faster_recursive_make(self):
substs = list(BASE_SUBSTS) + [
('BUILD_BACKENDS', 'FasterMake+RecursiveMake'),
]
with self.do_test_backend(get_backend_class(
'FasterMake+RecursiveMake'), substs=substs) as config:
buildid = mozpath.join(config.topobjdir, 'config', 'buildid')
ensureParentDir(buildid)
with open(buildid, 'w') as fh:
fh.write('20100101012345\n')
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
overrides = [
'install_manifest_depends=',
'MOZ_JAR_MAKER_FILE_FORMAT=flat',
'TEST_MOZBUILD=1',
]
with self.line_handler() as handle_make_line:
build._run_make(directory=config.topobjdir, target=overrides,
silent=False, line_handler=handle_make_line)
self.validate(config)
def test_faster_make(self):
substs = list(BASE_SUBSTS) + [
('MOZ_BUILD_APP', 'dummy_app'),
('MOZ_WIDGET_TOOLKIT', 'dummy_widget'),
]
with self.do_test_backend(RecursiveMakeBackend, FasterMakeBackend,
substs=substs) as config:
buildid = mozpath.join(config.topobjdir, 'config', 'buildid')
ensureParentDir(buildid)
with open(buildid, 'w') as fh:
fh.write('20100101012345\n')
build = MozbuildObject(config.topsrcdir, None, None,
config.topobjdir)
overrides = [
'TEST_MOZBUILD=1',
]
with self.line_handler() as handle_make_line:
build._run_make(directory=mozpath.join(config.topobjdir,
'faster'),
target=overrides, silent=False,
line_handler=handle_make_line)
self.validate(config)
def validate(self, config):
self.maxDiff = None
test_path = os.sep.join(('$SRCDIR', 'python', 'mozbuild', 'mozbuild',
'test', 'backend', 'data', 'build')) + os.sep
# We want unicode instances out of the files, because having plain str
# makes assertEqual diff output in case of error extra verbose because
# of the difference in type.
result = {
p: f.open().read().decode('utf-8')
for p, f in FileFinder(mozpath.join(config.topobjdir, 'dist'))
}
self.assertTrue(len(result))
self.assertEqual(result, {
'bin/baz.ini': 'baz.ini: FOO is foo\n',
'bin/child/bar.ini': 'bar.ini\n',
'bin/child2/foo.css': 'foo.css: FOO is foo\n',
'bin/child2/qux.ini': 'qux.ini: BAR is not defined\n',
'bin/chrome.manifest':
'manifest chrome/foo.manifest\n'
'manifest components/components.manifest\n',
'bin/chrome/foo.manifest':
'content bar foo/child/\n'
'content foo foo/\n'
'override chrome://foo/bar.svg#hello '
'chrome://bar/bar.svg#hello\n',
'bin/chrome/foo/bar.js': 'bar.js\n',
'bin/chrome/foo/child/baz.jsm':
'//@line 2 "%sbaz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path),
'bin/chrome/foo/child/hoge.js':
'//@line 2 "%sbar.js"\nbar.js: FOO is foo\n' % (test_path),
'bin/chrome/foo/foo.css': 'foo.css: FOO is foo\n',
'bin/chrome/foo/foo.js': 'foo.js\n',
'bin/chrome/foo/qux.js': 'bar.js\n',
'bin/components/bar.js':
'//@line 2 "%sbar.js"\nbar.js: FOO is foo\n' % (test_path),
'bin/components/components.manifest':
'component {foo} foo.js\ncomponent {bar} bar.js\n',
'bin/components/foo.js': 'foo.js\n',
'bin/defaults/pref/prefs.js': 'prefs.js\n',
'bin/foo.ini': 'foo.ini\n',
'bin/modules/baz.jsm':
'//@line 2 "%sbaz.jsm"\nbaz.jsm: FOO is foo\n' % (test_path),
'bin/modules/child/bar.jsm': 'bar.jsm\n',
'bin/modules/child2/qux.jsm':
'//@line 4 "%squx.jsm"\nqux.jsm: BAR is not defined\n'
% (test_path),
'bin/modules/foo.jsm': 'foo.jsm\n',
'bin/res/resource': 'resource\n',
'bin/res/child/resource2': 'resource2\n',
'bin/app/baz.ini': 'baz.ini: FOO is bar\n',
'bin/app/child/bar.ini': 'bar.ini\n',
'bin/app/child2/qux.ini': 'qux.ini: BAR is defined\n',
'bin/app/chrome.manifest':
'manifest chrome/foo.manifest\n'
'manifest components/components.manifest\n',
'bin/app/chrome/foo.manifest':
'content bar foo/child/\n'
'content foo foo/\n'
'override chrome://foo/bar.svg#hello '
'chrome://bar/bar.svg#hello\n',
'bin/app/chrome/foo/bar.js': 'bar.js\n',
'bin/app/chrome/foo/child/baz.jsm':
'//@line 2 "%sbaz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path),
'bin/app/chrome/foo/child/hoge.js':
'//@line 2 "%sbar.js"\nbar.js: FOO is bar\n' % (test_path),
'bin/app/chrome/foo/foo.css': 'foo.css: FOO is bar\n',
'bin/app/chrome/foo/foo.js': 'foo.js\n',
'bin/app/chrome/foo/qux.js': 'bar.js\n',
'bin/app/components/bar.js':
'//@line 2 "%sbar.js"\nbar.js: FOO is bar\n' % (test_path),
'bin/app/components/components.manifest':
'component {foo} foo.js\ncomponent {bar} bar.js\n',
'bin/app/components/foo.js': 'foo.js\n',
'bin/app/defaults/preferences/prefs.js': 'prefs.js\n',
'bin/app/foo.css': 'foo.css: FOO is bar\n',
'bin/app/foo.ini': 'foo.ini\n',
'bin/app/modules/baz.jsm':
'//@line 2 "%sbaz.jsm"\nbaz.jsm: FOO is bar\n' % (test_path),
'bin/app/modules/child/bar.jsm': 'bar.jsm\n',
'bin/app/modules/child2/qux.jsm':
'//@line 2 "%squx.jsm"\nqux.jsm: BAR is defined\n'
% (test_path),
'bin/app/modules/foo.jsm': 'foo.jsm\n',
})
if __name__ == '__main__':
main()
| mpl-2.0 | -1,361,531,391,960,118,500 | 40.268908 | 78 | 0.54551 | false |
Grumbel/rfactortools | race07-ids.py | 1 | 3707 | #!/usr/bin/env python3
# Race07 Tool
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import re
import os
import rfactortools
def show_buildin_ids(args):
if args.class_id:
if args.inverse:
for i in range(0, 256):
if not i in rfactortools.race07.class_ids:
print(i)
else:
for class_id, name in rfactortools.race07.class_ids.items():
print("%3d: %r" % (class_id, name))
if args.model_id:
if args.inverse:
for i in range(0, 256):
if not i in rfactortools.race07.class_ids:
print(i)
else:
for class_id, name in rfactortools.race07.model_ids.items():
print("%3d: %r" % (class_id, name))
keyvalue_regex = re.compile(r'^\s*([^=]+)\s*=\s*(.*)\s*')
comment_regex = re.compile(r'(.*?)(//.*)')
class Car:
def __init__(self):
self.class_id = None
self.model_id = None
self.classes = None
self.model = None
def read_ids(filename):
with open(filename, 'rt', encoding='latin-1') as fin:
lines = fin.read().splitlines()
car = Car()
for line in lines:
m = comment_regex.match(line)
if m:
# comment = m.group(2)
line = m.group(1)
m = keyvalue_regex.match(line)
if m:
key, value = m.group(1).lower(), m.group(2)
if key == "classid":
car.class_id = int(value)
elif key == "classes":
car.classes = value
elif key == "model":
car.model = value
elif key == "modelid":
car.model_id = int(value)
else:
pass # print("unhandled: \"%s\"" % key)
return car
def main():
parser = argparse.ArgumentParser(description="Race07 Tool")
parser.add_argument('DIRECTORY', action='store', type=str,
help='directory containing .gen and .veh files')
parser.add_argument('-c', '--class-id', action='store_true', default=False,
help='list class ids')
parser.add_argument('-m', '--model-id', action='store_true', default=False,
help='list model ids')
parser.add_argument('-i', '--inverse', action='store_true', default=False,
help='list free ids, instead of occupied ones')
args = parser.parse_args()
if args.DIRECTORY:
for fname in rfactortools.find_files(args.DIRECTORY):
ext = os.path.splitext(fname)[1].lower()
if ext == ".car" or ext == ".inccar":
car = read_ids(fname)
if not car.class_id and not car.model_id:
pass
elif not car.class_id or not car.model_id:
print("%4s %4s %s" % (car.class_id, car.model_id, fname))
else:
print("%4d %4d %s" % (car.class_id, car.model_id, fname))
if __name__ == "__main__":
main()
# EOF #
| gpl-3.0 | -381,973,977,070,208,450 | 31.234783 | 79 | 0.555166 | false |
wronk/mne-python | mne/epochs.py | 1 | 129010 | # -*- coding: utf-8 -*-
"""Tools for working with epoched data"""
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Daniel Strohmeier <[email protected]>
# Denis Engemann <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import json
import os.path as op
from distutils.version import LooseVersion
import numpy as np
import scipy
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float_matrix, write_float,
write_id, write_string, _get_split_size)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_pick_aux_channels, _DATA_CH_TYPES_SPLIT)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import _BaseRaw, ToDataFrameMixin, TimeMixin
from .bem import _check_origin
from .evoked import EvokedArray
from .baseline import rescale, _log_rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import resample, detrend, FilterMixin
from .event import _read_events_fif
from .fixes import in1d, _get_args
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs)
from .utils import (check_fname, logger, verbose, _check_type_picks,
_time_mask, check_random_state, object_hash, warn,
_check_copy_dep)
from .utils import deprecated
from .externals.six import iteritems, string_types
from .externals.six.moves import zip
def _save_split(epochs, fname, part_idx, n_parts):
"""Split epochs"""
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
if part_idx > 0:
fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
next_fname = None
if part_idx < n_parts - 1:
next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
base[idx + 1:]))
next_idx = part_idx + 1
fid = start_file(fname)
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
mapping_ = ';'.join([k + ':' + str(v) for k, v in
epochs.event_id.items()])
write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
bmin = epochs.times[0] if bmin is None else bmin
bmax = epochs.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
class _BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin, TimeMixin):
"""Abstract base class for Epochs-type classes
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
"""
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None,
picks=None, name='Unknown', reject=None, flat=None,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, proj=True, on_missing='error',
preload_at_end=False, selection=None, drop_log=None,
verbose=None):
self.verbose = verbose
self.name = name
if on_missing not in ['error', 'warning', 'ignore']:
raise ValueError('on_missing must be one of: error, '
'warning, ignore. Got: %s' % on_missing)
# check out event_id dict
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
elif isinstance(event_id, dict):
if not all(isinstance(v, int) for v in event_id.values()):
raise ValueError('Event IDs must be of type integer')
if not all(isinstance(k, string_types) for k in event_id):
raise ValueError('Event names must be of type str')
elif isinstance(event_id, list):
if not all(isinstance(v, int) for v in event_id):
raise ValueError('Event IDs must be of type integer')
event_id = dict(zip((str(i) for i in event_id), event_id))
elif isinstance(event_id, int):
event_id = {str(event_id): event_id}
else:
raise ValueError('event_id must be dict or int.')
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
if events.dtype.kind not in ['i', 'u']:
raise ValueError('events must be an array of type int')
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be 2D with 3 columns')
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
if on_missing == 'error':
raise ValueError(msg)
elif on_missing == 'warning':
warn(msg)
else: # on_missing == 'ignore':
pass
values = list(self.event_id.values())
selected = in1d(events[:, 2], values)
if selection is None:
self.selection = np.where(selected)[0]
else:
self.selection = selection
if drop_log is None:
self.drop_log = [list() if k in self.selection else ['IGNORED']
for k in range(len(events))]
else:
self.drop_log = drop_log
events = events[selected]
n_events = len(events)
if n_events > 1:
if np.diff(events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
self.events = events
del events
else:
self.drop_log = list()
self.selection = np.array([], int)
# do not set self.events here, let subclass do it
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if detrend not in [None, 0, 1]:
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if baseline is not None:
baseline_tmin, baseline_tmax = baseline
tstep = 1. / info['sfreq']
if baseline_tmin is not None:
if baseline_tmin < tmin - tstep:
err = ("Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
raise ValueError(err)
if baseline_tmax is not None:
if baseline_tmax > tmax + tstep:
err = ("Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
raise ValueError(err)
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
_log_rescale(baseline)
self.baseline = baseline
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.detrend = detrend
self._raw = raw
self.info = info
del info
if picks is None:
picks = list(range(len(self.info['ch_names'])))
else:
self.info = pick_info(self.info, picks)
self.picks = _check_type_picks(picks)
if len(picks) == 0:
raise ValueError("Picks cannot be empty.")
if data is None:
self.preload = False
self._data = None
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
self.preload = True
self._data = data
self._offset = None
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self.times = self._raw_times.copy()
self._decim = 1
self.decimate(decim)
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
def load_data(self):
"""Load the data if not already preloaded
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return
self._data = self._get_data()
self.preload = True
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
return self
def decimate(self, decim, copy=None, offset=0):
"""Decimate the epochs
Parameters
----------
decim : int
The amount to decimate data.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
offset : int
Apply an offset to where the decimation starts relative to the
sample corresponding to t=0. The offset is in samples at the
current sampling rate.
.. versionadded:: 0.12
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
Notes
-----
Decimation can be done multiple times. For example,
``epochs.decimate(2).decimate(2)`` will be the same as
``epochs.decimate(4)``.
.. versionadded:: 0.10.0
"""
if decim < 1 or decim != int(decim):
raise ValueError('decim must be an integer > 0')
decim = int(decim)
epochs = _check_copy_dep(self, copy)
del self
new_sfreq = epochs.info['sfreq'] / float(decim)
lowpass = epochs.info['lowpass']
if decim > 1 and lowpass is None:
warn('The measurement information indicates data is not low-pass '
'filtered. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (decim, new_sfreq))
elif decim > 1 and new_sfreq < 2.5 * lowpass:
warn('The measurement information indicates a low-pass frequency '
'of %g Hz. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
epochs._decim *= decim
start_idx = int(round(epochs._raw_times[0] * (epochs.info['sfreq'] *
epochs._decim)))
i_start = start_idx % epochs._decim
decim_slice = slice(i_start + offset, len(epochs._raw_times),
epochs._decim)
epochs.info['sfreq'] = new_sfreq
if epochs.preload:
epochs._data = epochs._data[:, :, decim_slice].copy()
epochs._raw_times = epochs._raw_times[decim_slice].copy()
epochs._decim_slice = slice(None, None, None)
epochs._decim = 1
epochs.times = epochs._raw_times
else:
epochs._decim_slice = decim_slice
epochs.times = epochs._raw_times[epochs._decim_slice]
return epochs
@verbose
def apply_baseline(self, baseline, copy=None, verbose=None):
"""Baseline correct epochs
Parameters
----------
baseline : tuple of length 2
The time interval to apply baseline correction. (a, b) is the
interval is between "a (s)" and "b (s)". If a is None the beginning
of the data is used and if b is None then b is set to the end of
the interval. If baseline is equal to (None, None) all the time
interval is used.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times.
.. versionadded:: 0.10.0
"""
if not isinstance(baseline, tuple) or len(baseline) != 2:
raise ValueError('`baseline=%s` is an invalid argument.'
% str(baseline))
epochs = _check_copy_dep(self, copy)
picks = _pick_data_channels(epochs.info, exclude=[], with_ref_meg=True)
picks_aux = _pick_aux_channels(epochs.info, exclude=[])
picks = np.sort(np.concatenate((picks, picks_aux)))
data = epochs._data
data[:, picks, :] = rescale(data[:, picks, :], self.times, baseline,
copy=False)
epochs.baseline = baseline
return epochs
def _reject_setup(self, reject, flat):
"""Sets self._reject_time and self._channel_type_idx"""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
for key in set(reject.keys()).union(old_reject.keys()):
old = old_reject.get(key, np.inf)
new = reject.get(key, np.inf)
if new > old:
raise ValueError(bad_msg.format(kind='reject', key=key,
new=new, old=old, op='>'))
for key in set(flat.keys()).union(old_flat.keys()):
old = old_flat.get(key, -np.inf)
new = flat.get(key, -np.inf)
if new < old:
raise ValueError(bad_msg.format(kind='flat', key=key,
new=new, old=old, op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if isinstance(data, string_types):
return False, [data]
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, string_types):
return epoch
# Detrend
if self.detrend is not None:
picks = _pick_data_channels(self.info, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=True, eog=True, ecg=True, seeg=True,
emg=True, bio=True, ecog=True, exclude=[])
epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
copy=False, verbose=False)
# handle offset
if self._offset is not None:
epoch += self._offset
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
return epoch
def iter_evoked(self):
"""Iterate over epochs as a sequence of Evoked objects
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
"""
self._current = 0
while True:
out = self.next(True)
if out is None:
return # properly signal the end of iteration
data, event_id = out
tmin = self.times[0]
info = deepcopy(self.info)
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1].
References
----------
[1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
def __next__(self, *args, **kwargs):
"""Wrapper for Py3k"""
return self.next(*args, **kwargs)
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash epochs unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def average(self, picks=None):
"""Compute average of epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The averaged epochs.
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
"""
return self._compute_mean_or_stderr(picks, 'ave')
def standard_error(self, picks=None):
"""Compute standard error over epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_mean_or_stderr(picks, 'stderr')
def _compute_mean_or_stderr(self, picks, mode='ave'):
"""Compute the mean or std over epochs and return Evoked"""
_do_std = True if mode == 'stderr' else False
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = np.std if _do_std else np.mean
data = fun(self._data, axis=0)
assert len(self.events) == len(self._data)
else:
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if _do_std:
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if not _do_std:
kind = 'average'
else:
kind = 'standard_error'
data /= np.sqrt(n_events)
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind)
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind):
"""Helper to create an evoked object from epoch data"""
info = deepcopy(info)
evoked = EvokedArray(data, info, tmin=self.times[0],
comment=self.name, nave=n_events, kind=kind,
verbose=self.verbose)
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
# pick channels
if picks is None:
picks = _pick_data_channels(evoked.info, exclude=[])
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def plot(self, picks=None, scalings=None, show=True,
block=False, n_epochs=20,
n_channels=20, title=None):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side
of the main axes. Calling this function drops all the selected bad
epochs as well as bad epochs marked beforehand with rejection
parameters.
Parameters
----------
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to
``dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)``.
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
n_epochs : int
The number of epochs per view.
n_channels : int
The number of channels per view on mne_browse_epochs. If trellis is
True, this parameter has no effect. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
If trellis is True, this parameter has no effect.
Defaults to None.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can
be used to navigate between channels and epochs and the scaling can be
adjusted with - and + (or =) keys, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
Full screen mode can be toggled with f11 key. The amount of epochs
and channels per view can be adjusted with home/end and
page down/page up keys. Butterfly plot can be toggled with ``b`` key.
Right mouse click adds a vertical line to the plot.
.. versionadded:: 0.10.0
"""
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, show=show, block=block)
def plot_psd(self, fmin=0, fmax=np.inf, proj=False, bandwidth=None,
adaptive=False, low_bias=True, normalization='length',
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, dB=True, n_jobs=1, verbose=None, show=True):
"""Plot the power spectral density across epochs
Parameters
----------
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across
channels) will be plotted. If 'range', the min and max (across
channels) will be plotted. Bad channels will be excluded from
these calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
picks=picks, ax=ax, color=color,
area_mode=area_mode, area_alpha=area_alpha,
dB=dB, n_jobs=n_jobs, verbose=None, show=show)
def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None,
layout=None, cmap='RdBu_r', agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='%0.3f',
outlines='head', show=True, verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the
output equals vmax(data). Defaults to None.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self, bands=bands, vmin=vmin, vmax=vmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
ch_type=ch_type, layout=layout, cmap=cmap,
agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along the
epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor, font_color=font_color,
show=show)
@deprecated('drop_bad_epochs method has been renamed drop_bad. '
'drop_bad_epochs method will be removed in 0.13')
def drop_bad_epochs(self, reject='existing', flat='existing'):
"""Drop bad epochs without retaining the epochs data"""
return self.drop_bad(reject, flat)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :func:`mne.Epochs.load_data()`.
Parameters
----------
reject : dict | str | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. If 'existing',
then the rejection parameters set at instantiation are used.
flat : dict | str | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done. If 'existing',
then the flat parameters set at instantiation are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy` should be used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, string_types) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
perc : float
Total percentage of epochs dropped.
fig : Instance of matplotlib.figure.Figure
The figure.
"""
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
from .viz import plot_drop_log
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
def plot_image(self, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is
applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two
axes for drawing the single trials and evoked responses. If
None a new figure is created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in
seconds and is added to the image. It is typically useful to
display reaction times. Note that it is defined with respect
to the order of epochs such that overlay_times[0] corresponds
to epochs[0].
Returns
-------
figs : list of matplotlib figures
One figure per channel displayed.
"""
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig,
overlay_times=overlay_times)
@deprecated('drop_epochs method has been renamed drop. '
'drop_epochs method will be removed in 0.13')
def drop_epochs(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask"""
return self.drop(indices, reason, verbose)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :func:`mne.Epochs.drop_bad` or
:func:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of ints or bools
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
out_of_bounds = (indices < 0) | (indices >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
for ii in indices:
self.drop_log[self.selection[ii]].append(reason)
self.selection = np.delete(self.selection, indices)
self.events = np.delete(self.events, indices, axis=0)
if self.preload:
self._data = np.delete(self._data, indices, axis=0)
count = len(indices)
logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Method to get a given epoch from disk"""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Helper to process a raw epoch based on the delayed param"""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, string_types):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, verbose=None):
"""Load all data, dropping bad epochs along the way
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
n_events = len(self.events)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
logger.info('Loading data for %s events and %s original time '
'points ...' % (n_events, len(self._raw_times)))
if self._bad_dropped:
if not out:
return
if self.preload:
return data
# we need to load from disk, drop, and return data
for idx in range(n_events):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if idx == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[idx] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
assert n_events == len(self.selection)
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, offending_reason = self._is_good_epoch(epoch)
if not is_good:
self.drop_log[sel] += offending_reason
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# Now update our properties
if len(good_idx) == 0: # silly fix for old numpy index error
self.selection = np.array([], int)
self.events = np.empty((0, 3))
else:
self.selection = self.selection[good_idx]
self.events = np.atleast_2d(self.events[good_idx])
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
data.resize((n_out,) + data.shape[1:], refcheck=False)
return data if out else None
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A copy of the epochs data.
"""
return self._get_data()
def __len__(self):
"""Number of epochs.
"""
if not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""To make iteration over epochs easy.
"""
self._current = 0
while True:
x = self.next()
if x is None:
return
yield x
def next(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
return # signal the end
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
return # signal the end properly
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
return epoch if not return_event_id else epoch, self.event_id
@property
def tmin(self):
return self.times[0]
@property
def tmax(self):
return self.times[-1]
def __repr__(self):
""" Build string representation
"""
s = 'n_events : %s ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
if len(self.event_id) > 1:
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
s += ',\n %s' % ', '.join(counts)
class_name = self.__class__.__name__
if class_name == '_BaseEpochs':
class_name = 'Epochs'
return '<%s | %s>' % (class_name, s)
def _key_match(self, key):
"""Helper function for event dict use"""
if key not in self.event_id:
raise KeyError('Event "%s" is not in Epochs.' % key)
return self.events[:, 2] == self.event_id[key]
def __getitem__(self, key):
"""Return an Epochs object with a subset of epochs
"""
data = self._data
del self._data
epochs = self.copy()
self._data, epochs._data = data, data
del self
if isinstance(key, string_types):
key = [key]
if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
if any('/' in k_i for k_i in epochs.event_id.keys()):
if any(k_e not in epochs.event_id for k_e in key):
# Select a given key if the requested set of
# '/'-separated types are a subset of the types in that key
key = [k for k in epochs.event_id.keys()
if all(set(k_i.split('/')).issubset(k.split('/'))
for k_i in key)]
if len(key) == 0:
raise KeyError('Attempting selection of events via '
'multiple/partial matching, but no '
'event matches all criteria.')
select = np.any(np.atleast_2d([epochs._key_match(k)
for k in key]), axis=0)
epochs.name = '+'.join(key)
else:
select = key if isinstance(key, slice) else np.atleast_1d(key)
key_selection = epochs.selection[select]
for k in np.setdiff1d(epochs.selection, key_selection):
epochs.drop_log[k] = ['IGNORED']
epochs.selection = key_selection
epochs.events = np.atleast_2d(epochs.events[select])
if epochs.preload:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
epochs._data = np.require(epochs._data[select], requirements=['O'])
# update event id to reflect new content of epochs
epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
if v in epochs.events[:, 2])
return epochs
def crop(self, tmin=None, tmax=None, copy=None):
"""Crops a time interval from epochs object.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The cropped epochs.
Notes
-----
Unlike Python slices, MNE time intervals include both their end points;
crop(tmin, tmax) returns the interval tmin <= t <= tmax.
"""
# XXX this could be made to work on non-preloaded data...
if not self.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
this_epochs = _check_copy_dep(self, copy)
this_epochs.times = this_epochs.times[tmask]
this_epochs._raw_times = this_epochs._raw_times[tmask]
this_epochs._data = this_epochs._data[:, :, tmask]
return this_epochs
@verbose
def resample(self, sfreq, npad=None, window='boxcar', n_jobs=1,
copy=None, verbose=None):
"""Resample preloaded data
Parameters
----------
sfreq : float
New sample rate to use
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The resampled epochs object.
See Also
--------
mne.Epochs.savgol_filter
mne.io.Raw.resample
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
# XXX this could operate on non-preloaded data, too
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
if npad is None:
npad = 100
warn('npad is currently taken to be 100, but will be changed to '
'"auto" in 0.13. Please set the value explicitly.',
DeprecationWarning)
inst = _check_copy_dep(self, copy)
o_sfreq = inst.info['sfreq']
inst._data = resample(inst._data, sfreq, o_sfreq, npad, window=window,
n_jobs=n_jobs)
# adjust indirectly affected variables
inst.info['sfreq'] = float(sfreq)
inst.times = (np.arange(inst._data.shape[2], dtype=np.float) /
sfreq + inst.times[0])
return inst
def copy(self):
"""Return copy of Epochs instance"""
raw = self._raw
del self._raw
new = deepcopy(self)
self._raw = raw
new._raw = raw
return new
def save(self, fname, split_size='2GB'):
"""Save epochs in a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
split_size = _get_split_size(split_size)
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
total_size = self[0].get_data().nbytes * len(self)
n_parts = int(np.ceil(total_size / float(split_size)))
epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts)
def equalize_event_counts(self, event_ids, method='mintime', copy=None):
"""Equalize the number of trials in each condition
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
In the case where partial matching is used (using '/' in
`event_ids`), `event_ids` will be matched according to the
provided tags, that is, processing works as if the event_ids
matched by the provided tags had been supplied instead.
The event_ids must identify nonoverlapping subsets of the epochs.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list
will be minimized.
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
If multiple indices are provided (e.g. 'Left' and 'Right' in the
example above), it is not guaranteed that after equalization, the
conditions will contribute evenly. E.g., it is possible to end up
with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
"""
epochs = _check_copy_dep(self, copy, default=True)
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not epochs._bad_dropped:
epochs.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = epochs.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, string_types) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids if all((tag in k.split("/")
for tag in id_))] # find ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(orig_ids[ii] + "not found in the "
"epoch object's event_id.")
elif len(set([sub_id in ids for sub_id in id_])) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(epochs[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq = np.atleast_1d(eq)
# eq is now a list of types
key_match = np.zeros(epochs.events.shape[0])
for key in eq:
key_match = np.logical_or(key_match, epochs._key_match(key))
eq_inds.append(np.where(key_match)[0])
event_times = [epochs.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
epochs.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return epochs, indices
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
class Epochs(_BaseEpochs):
"""Epochs extracted from a Raw instance
Parameters
----------
raw : Raw object
An instance of Raw.
events : array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event. If nothing is provided, defaults to -0.2
tmax : float
End time after event. If nothing is provided, defaults to 0.5
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels are used).
name : string
Comment that describes the Epochs data created.
preload : boolean
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
on_missing : str
What to do if one or several event ids are not found in the recording.
Valid keys are 'error' | 'warning' | 'ignore'
Default is 'error'. If on_missing is 'warning' it will proceed but
warn, if 'ignore' it will proceed silently. Note.
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
reject_by_annotation : bool
Whether to reject based on annotations. If True (default), epochs
overlapping with segments whose description begins with ``'bad'`` are
rejected. If False, no rejection based on annotations is performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Attributes
----------
info: dict
Measurement info.
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : list of lists
A list of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty list; otherwise it will be
a list of the reasons the event is not longer in the selection, e.g.:
'IGNORED' if it isn't part of the current subset defined by the user;
'NO_DATA' or 'TOO_SHORT' if epoch didn't contain enough data;
names of channels that exceeded the amplitude threshold;
'EQUALIZED_COUNTS' (see equalize_event_counts);
or 'USER' for user-defined reasons (see drop method).
verbose : bool, str, int, or None
See above.
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing:
epochs[idx] : Epochs
Return Epochs object with a subset of epochs (supports single
index and python-style slicing)
For subset selection using categorial labels:
epochs['name'] : Epochs
Return Epochs object with a subset of epochs corresponding to an
experimental condition as specified by 'name'.
If conditions are tagged by names separated by '/' (e.g. 'audio/left',
'audio/right'), and 'name' is not in itself an event key, this selects
every event whose condition contains the 'name' tag (e.g., 'left'
matches 'audio/left' and 'visual/left'; but not 'audio_left'). Note
that tags like 'auditory/left' and 'left/auditory' will be treated the
same way when accessed using tags.
epochs[['name_1', 'name_2', ... ]] : Epochs
Return Epochs object with a subset of epochs corresponding to multiple
experimental conditions as specified by 'name_1', 'name_2', ... .
If conditions are separated by '/', selects every item containing every
list tag (e.g. ['audio', 'left'] selects 'audio/left' and
'audio/center/left', but not 'audio/right').
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, name='Unknown', preload=False,
reject=None, flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, add_eeg_ref=True,
on_missing='error', reject_by_annotation=True, verbose=None):
if not isinstance(raw, _BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of `mne.io.Raw`')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# call _BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax, baseline=baseline,
raw=raw, picks=picks, name=name, reject=reject, flat=flat,
decim=decim, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
detrend=detrend, add_eeg_ref=add_eeg_ref, proj=proj,
on_missing=on_missing, preload_at_end=preload, verbose=verbose)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk
Returns
-------
data : array | str | None
If string it's details on rejection reason.
If None it means no data.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found.'
' Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment
first_samp = self._raw.first_samp
start = int(round(event_samp + self.tmin * sfreq)) - first_samp
stop = start + len(self._raw_times)
data = self._raw._check_bad_segment(start, stop, self.picks,
self.reject_by_annotation)
return data
class EpochsArray(_BaseEpochs):
"""Epochs object from numpy array
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to -0.2.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
baseline : None or tuple of length 2 (default: None)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
io.RawArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
if data.shape[0] != len(events):
raise ValueError('The number of epochs and the number of events'
'must match')
info = deepcopy(info) # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
super(EpochsArray, self).__init__(info, data, events, event_id, tmin,
tmax, baseline, reject=reject,
flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, decim=1,
add_eeg_ref=False, proj=proj)
if len(events) != in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
for ii, e in enumerate(self._data):
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
if not isinstance(new_event_num, int):
raise ValueError('new_event_id value must be an integer')
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Note that this operates on the Epochs instances in-place.
Example:
equalize_epoch_counts(epochs1, epochs2)
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
"""
if not all(isinstance(e, _BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Helper to get indices to drop from multiple event timing lists"""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
if method not in ['mintime', 'truncate']:
raise ValueError('method must be either mintime or truncate, not '
'%s' % method)
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _fix_fill(fill):
"""Helper to fix bug on old scipy"""
if LooseVersion(scipy.__version__) < LooseVersion('0.12'):
fill = fill[:, np.newaxis]
return fill
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences"""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False)
# this is a speed tweak, only exists for certain versions of scipy
if 'assume_sorted' in _get_args(interp1d.__init__):
kwargs['assume_sorted'] = True
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=_fix_fill(t_keeps[:, -1]),
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to the criteria
defined in reject and flat. If full_report=True, it will give
True/False as well as a list of all offending channels.
"""
bad_list = list()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in iteritems(refl):
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
ch_name = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, ch_name))
has_printed = True
if not full_report:
return False
else:
bad_list.extend(ch_name)
if not full_report:
return True
else:
if bad_list == []:
return True, None
else:
return False, bad_list
def _read_one_epoch_file(f, tree, fname, preload):
"""Helper to read a single FIF file"""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
info['filename'] = fname
events, mappings = _read_events_fif(fid, tree)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
name = None
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
name = tag.data
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], name))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
expected = len(events) * np.prod(epoch_shape)
if data_tag.size // 4 - 4 != expected: # 32-bit floats stored
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (data_tag.size // 4, expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(np.float64)
data *= cals[np.newaxis, :, :]
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = [[] for _ in range(len(epochs))] # noqa, analysis:ignore
return (info, data, data_tag, events, event_id, tmin, tmax, baseline, name,
selection, drop_log, epoch_shape, cals)
@verbose
def read_epochs(fname, proj=True, add_eeg_ref=False, preload=True,
verbose=None):
"""Read epochs from a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
epochs : instance of Epochs
The epochs
"""
return EpochsFIF(fname, proj, add_eeg_ref, preload, verbose)
class _RawContainer(object):
def __init__(self, fid, data_tag, event_samps, epoch_shape, cals):
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
def __del__(self):
self.fid.close()
class EpochsFIF(_BaseEpochs):
"""Epochs read from disk
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, add_eeg_ref=True, preload=True,
verbose=None):
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, tmin, tmax, baseline,
name, selection, drop_log, epoch_shape, cals) = \
_read_one_epoch_file(fid, tree, fname, preload)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
epoch = _BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline,
on_missing='ignore', selection=selection, drop_log=drop_log,
add_eeg_ref=False, proj=False, verbose=False)
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals))
if next_fname is not None:
fnames.append(next_fname)
(info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, _) = _concatenate_epochs(ep_list, with_data=preload)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ['IGNORED'] and b != ['IGNORED']:
drop_log[k] = b
drop_log = drop_log[:step]
# call _BaseEpochs constructor
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, raw=raw,
name=name, proj=proj, add_eeg_ref=add_eeg_ref,
preload_at_end=False, on_missing='ignore', selection=selection,
drop_log=drop_log, verbose=verbose)
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk"""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
idx = idx[0]
size = np.prod(raw.epoch_shape) * 4
offset = idx * size
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset + 16, 0) # 16 = Tag header
data = np.fromstring(raw.fid.read(size), '>f4').astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
random_state : None | int | np.random.RandomState
To specify the random generator state
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng.randint(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function"""
if len(set(tuple(epochs.event_id.items()) for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len(set(epochs.tmin for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len(set(epochs.tmax for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len(set(epochs.baseline for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
verbose=None):
"""Concatenate channels, info and data from two Epochs objects
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
name : str
Comment that describes the Epochs data created.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless there is no
EEG in the data).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list) or add_eeg_ref
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.name = name
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, ind):
"""Compare infos"""
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
if info1['bads'] != info2['bads']:
raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
if info1['sfreq'] != info2['sfreq']:
raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
if len(info2['projs']) != len(info1['projs']):
raise ValueError('SSP projectors in epochs files must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError('SSP projectors in epochs files must be the same')
def _concatenate_epochs(epochs_list, with_data=True):
"""Auxiliary function for concatenating epochs."""
out = epochs_list[0]
data = [out.get_data()] if with_data else None
events = [out.events]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
info = deepcopy(out.info)
verbose = out.verbose
drop_log = deepcopy(out.drop_log)
event_id = deepcopy(out.event_id)
selection = out.selection
for ii, epochs in enumerate(epochs_list[1:]):
_compare_epochs_infos(epochs.info, info, ii)
if not np.array_equal(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
if with_data:
data.append(epochs.get_data())
events.append(epochs.events)
selection = np.concatenate((selection, epochs.selection))
drop_log.extend(epochs.drop_log)
event_id.update(epochs.event_id)
events = np.concatenate(events, axis=0)
if with_data:
data = np.concatenate(data, axis=0)
return (info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, verbose)
def _finish_concat(info, data, events, event_id, tmin, tmax, baseline,
selection, drop_log, verbose):
"""Helper to finish concatenation for epochs not read from disk"""
events[:, 0] = np.arange(len(events)) # arbitrary after concat
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = _BaseEpochs(info, data, events, event_id, tmin, tmax,
baseline=baseline, add_eeg_ref=False,
selection=selection, drop_log=drop_log,
proj=False, on_missing='ignore', verbose=verbose)
out.drop_bad()
return out
def concatenate_epochs(epochs_list):
"""Concatenate a list of epochs into one epochs object
Parameters
----------
epochs_list : list
list of Epochs instances to concatenate (in order).
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
pos=None, verbose=None):
"""Average data using Maxwell filtering, transforming using head positions
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
head_pos : array | tuple | None
The array should be of shape ``(N, 10)``, holding the position
parameters as returned by e.g. `read_head_pos`. For backward
compatibility, this can also be a tuple of ``(trans, rot t)``
as returned by `head_pos_to_trans_rot_t`.
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in head
coords and in meters. The default is ``'auto'``, which means
a head-digitization-based origin fit.
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
regularize : str | None
Basis regularization type, must be "in" or None.
See :func:`mne.preprocessing.maxwell_filter` for details.
Regularization is chosen based only on the destination position.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
.. versionadded:: 0.12
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
return_mapping : bool
If True, return the mapping matrix.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
"""
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks,
_prep_mf_coils, _check_destination,
_remove_meg_projs)
if pos is not None:
head_pos = pos
warn('pos has been replaced by head_pos and will be removed in 0.13',
DeprecationWarning)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, _BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
meg_picks, _, _, good_picks, coil_scale, _ = \
_get_mf_picks(epochs.info, int_order, ext_order, ignore_ref)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(epochs.info, good_picks, copy=True)
all_coils_recon = _prep_mf_coils(epochs.info, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
info_to = deepcopy(epochs.info)
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_picks]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = epochs.info['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version
weight = np.sqrt(np.sum(S * S)) # frobenius norm (eq. 44)
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[good_picks])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average')
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
| bsd-3-clause | 669,946,045,660,823,700 | 40.938557 | 80 | 0.57077 | false |
twotwo/tools-python | pandas-sample/save-stock-info.py | 1 | 1444 | # fetch remove data to local excel: AAPL.xls/MSFT.xls
# https://github.com/pydata/pandas-datareader/blob/master/pandas_datareader/data.py
import datetime
import os
import pandas as pd
import pandas_datareader.data as web
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
warnings.filterwarnings("ignore", category=FutureWarning)
print('use export PYTHONWARNINGS="ignore" to disable warning')
start = datetime.datetime(2018, 1, 1)
end = datetime.date.today()
if os.path.exists('data/AAPL.xls'):
print('data/AAPL.xls exist')
else:
apple = web.DataReader("AAPL", "yahoo", start, end)
# pandas.core.frame.DataFrame
print(f"type(apple)={type(apple)}")
stocks = ['AAPL', "GOOG", 'MSFT']
for stock in stocks:
if os.path.exists(f'./data/{stock}.xls'):
print(f'./data/{stock}.xls exist')
continue
# save to excel
print(f"saving {stock}.xls ...")
web.DataReader(stock, 'yahoo', start, end).to_excel(
f'./data/{stock}.xls')
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html
# index_col: int, default None. Column (0-indexed) to use as the row labels.
apple = pd.read_excel("./data/AAPL.xls", index_col=0)
ms = pd.read_excel("./data/MSFT.xls", index_col=0)
print(f"\n=== head of stock ===\n{apple.head()}\n")
print(f"\n=== index of stock ===\n{apple.index}\n")
print(f"=== apple.describe ===\n{apple.describe()}")
| mit | 107,123,274,152,641,920 | 32.581395 | 83 | 0.680748 | false |
twbattaglia/conda-menu | app/cmd/conda_api.py | 1 | 16815 | import re
import os
import sys
import json
from subprocess import Popen, PIPE
from os.path import basename, isdir, join
__version__ = '1.2.1-modified'
class CondaError(Exception):
"General Conda error"
pass
class CondaEnvExistsError(CondaError):
"Conda environment already exists"
pass
def _call_conda(extra_args, abspath=True):
# call conda with the list of extra arguments, and return the tuple
# stdout, stderr
if abspath:
if sys.platform == 'win32':
python = join(ROOT_PREFIX, 'python.exe')
conda = join(ROOT_PREFIX, 'Scripts', 'conda-script.py')
else:
python = join(ROOT_PREFIX, 'bin/python')
conda = join(ROOT_PREFIX, 'bin/conda')
cmd_list = [python, conda]
else: # just use whatever conda is on the path
cmd_list = ['conda']
cmd_list.extend(extra_args)
try:
p = Popen(cmd_list, stdout=PIPE, stderr=PIPE)
except OSError:
raise Exception("could not invoke %r\n" % args)
return p.communicate()
def _call_and_parse(extra_args, abspath=True):
stdout, stderr = _call_conda(extra_args, abspath=abspath)
if stderr.decode().strip():
raise Exception('conda %r:\nSTDERR:\n%s\nEND' % (extra_args,
stderr.decode()))
return json.loads(stdout.decode())
def _setup_install_commands_from_kwargs(kwargs, keys=tuple()):
cmd_list = []
if kwargs.get('override_channels', False) and 'channel' not in kwargs:
raise TypeError('conda search: override_channels requires channel')
if 'env' in kwargs:
cmd_list.extend(['--name', kwargs.pop('env')])
if 'prefix' in kwargs:
cmd_list.extend(['--prefix', kwargs.pop('prefix')])
if 'channel' in kwargs:
channel = kwargs.pop('channel')
if isinstance(channel, str):
cmd_list.extend(['--channel', channel])
else:
cmd_list.append('--channel')
cmd_list.extend(channel)
for key in keys:
if key in kwargs and kwargs[key]:
cmd_list.append('--' + key.replace('_', '-'))
return cmd_list
def set_root_prefix(prefix=None):
"""
Set the prefix to the root environment (default is /opt/anaconda).
This function should only be called once (right after importing conda_api).
"""
global ROOT_PREFIX
if prefix:
ROOT_PREFIX = prefix
else:
# find some conda instance, and then use info to get 'root_prefix'
info = _call_and_parse(['info', '--json'], abspath=False)
ROOT_PREFIX = info['root_prefix']
def get_conda_version():
"""
return the version of conda being used (invoked) as a string
"""
pat = re.compile(r'conda:?\s+(\d+\.\d\S+|unknown)')
stdout, stderr = _call_conda(['--version'])
# argparse outputs version to stderr in Python < 3.4.
# http://bugs.python.org/issue18920
m = pat.match(stderr.decode().strip())
if m is None:
m = pat.match(stdout.decode().strip())
if m is None:
raise Exception('output did not match: %r' % stderr)
return m.group(1)
def get_envs():
"""
Return all of the (named) environment (this does not include the root
environment), as a list of absolute path to their prefixes.
"""
info = _call_and_parse(['info', '--json'])
return info['envs']
def get_prefix_envname(name):
"""
Given the name of an environment return its full prefix path, or None
if it cannot be found.
"""
if name == 'root':
return ROOT_PREFIX
for prefix in get_envs():
if basename(prefix) == name:
return prefix
return None
def linked(prefix):
"""
Return the (set of canonical names) of linked packages in `prefix`.
"""
if not isdir(prefix):
raise Exception('no such directory: %r' % prefix)
meta_dir = join(prefix, 'conda-meta')
if not isdir(meta_dir):
# we might have nothing in linked (and no conda-meta directory)
return set()
return set(fn[:-5] for fn in os.listdir(meta_dir) if fn.endswith('.json'))
def split_canonical_name(cname):
"""
Split a canonical package name into (name, version, build) strings.
"""
return tuple(cname.rsplit('-', 2))
def info(abspath=True):
"""
Return a dictionary with configuration information.
No guarantee is made about which keys exist. Therefore this function
should only be used for testing and debugging.
"""
return _call_and_parse(['info', '--json'], abspath=abspath)
def package_info(package, abspath=True):
"""
Return a dictionary with package information.
"""
return _call_and_parse(['info', package, '--json'], abspath=abspath)
def search(regex=None, spec=None, **kwargs):
"""
Search for packages.
"""
cmd_list = ['search', '--json']
if regex and spec:
raise TypeError('conda search: only one of regex or spec allowed')
if regex:
cmd_list.append(regex)
if spec:
cmd_list.extend(['--spec', spec])
if 'platform' in kwargs:
cmd_list.extend(['--platform', kwargs.pop('platform')])
cmd_list.extend(
_setup_install_commands_from_kwargs(
kwargs,
('canonical', 'unknown', 'use_index_cache', 'outdated',
'override_channels')))
return _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
def create(name=None, prefix=None, pkgs=None):
"""
Create an environment either by name or path with a specified set of
packages
"""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to '
'install into new environment')
cmd_list = ['create', '--yes', '--quiet']
if name:
ref = name
search = [os.path.join(d, name) for d in info()['envs_dirs']]
cmd_list = ['create', '--yes', '--quiet', '--name', name]
elif prefix:
ref = prefix
search = [prefix]
cmd_list = ['create', '--yes', '--quiet', '--prefix', prefix]
else:
raise TypeError('must specify either an environment name or a path '
'for new environment')
if any(os.path.exists(prefix) for prefix in search):
raise CondaEnvExistsError('Conda environment [%s] already exists' % ref)
cmd_list.extend(pkgs)
(out, err) = _call_conda(cmd_list)
if err.decode().strip():
raise CondaError('conda %s: %s' % (" ".join(cmd_list), err.decode()))
return out
def create_yml(yml=None):
"""
Create new env from YAML file
"""
if not yml:
raise TypeError('Must specify a yaml file location')
cmd_list = ['env', 'create', '--file', yml]
(out, err) = _call_conda(cmd_list)
return out
def export_yml(name=None, yml=None):
"""
Export conda env to yaml file
"""
if not yml or not name:
raise TypeError('Must specify a yaml file location and name')
cmd_list = ['env', 'export', '--file', yml, '--name', name]
(out, err) = _call_conda(cmd_list)
return out
def install(name=None, prefix=None, pkgs=None):
"""
Install packages into an environment either by name or path with a
specified set of packages
"""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to '
'install into existing environment')
cmd_list = ['install', '--yes', '--quiet']
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else: # just install into the current environment, whatever that is
pass
cmd_list.extend(pkgs)
(out, err) = _call_conda(cmd_list)
if err.decode().strip():
raise CondaError('conda %s: %s' % (" ".join(cmd_list), err.decode()))
return out
def update(*pkgs, **kwargs):
"""
Update package(s) (in an environment) by name.
"""
cmd_list = ['update', '--json', '--quiet', '--yes']
if not pkgs and not kwargs.get('all'):
raise TypeError("Must specify at least one package to update, or all=True.")
cmd_list.extend(
_setup_install_commands_from_kwargs(
kwargs,
('dry_run', 'no_deps', 'override_channels',
'no_pin', 'force', 'all', 'use_index_cache', 'use_local',
'alt_hint')))
cmd_list.extend(pkgs)
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result
def remove(*pkgs, **kwargs):
"""
Remove a package (from an environment) by name.
Returns {
success: bool, (this is always true),
(other information)
}
"""
cmd_list = ['remove', '--json', '--quiet', '--yes']
if not pkgs and not kwargs.get('all'):
raise TypeError("Must specify at least one package to remove, or all=True.")
if kwargs.get('name') and kwargs.get('path'):
raise TypeError('conda remove: At most one of name, path allowed')
if kwargs.get('name'):
cmd_list.extend(['--name', kwargs.pop('name')])
if kwargs.get('path'):
cmd_list.extend(['--prefix', kwargs.pop('path')])
cmd_list.extend(
_setup_install_commands_from_kwargs(
kwargs,
('dry_run', 'features', 'override_channels',
'no_pin', 'force', 'all')))
cmd_list.extend(pkgs)
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result
def remove_environment(name=None, path=None, **kwargs):
"""
Remove an environment entirely.
See ``remove``.
"""
return remove(name=name, path=path, all=True, **kwargs)
def clone_environment(clone, name=None, path=None, **kwargs):
"""
Clone the environment ``clone`` into ``name`` or ``path``.
"""
cmd_list = ['create', '--json', '--quiet']
if (name and path) or not (name or path):
raise TypeError("conda clone_environment: exactly one of name or path required")
if name:
cmd_list.extend(['--name', name])
if path:
cmd_list.extend(['--prefix', path])
cmd_list.extend(['--clone', clone])
cmd_list.extend(
_setup_install_commands_from_kwargs(
kwargs,
('dry_run', 'unknown', 'use_index_cache', 'use_local', 'no_pin',
'force', 'all', 'channel', 'override_channels', 'no_default_packages')))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result
def process(name=None, prefix=None, cmd=None, args=None,
stdin=None, stdout=None, stderr=None, timeout=None):
"""
Create a Popen process for cmd using the specified args but in the conda
environment specified by name or prefix.
The returned object will need to be invoked with p.communicate() or similar.
"""
if bool(name) == bool(prefix):
raise TypeError('exactly one of name or prefix must be specified')
if not cmd:
raise TypeError('cmd to execute must be specified')
if not args:
args = []
if name:
prefix = get_prefix_envname(name)
conda_env = dict(os.environ)
if sys.platform == 'win32':
conda_env['PATH'] = join(prefix, 'Scripts') + os.pathsep + conda_env['PATH']
else: # Unix
conda_env['PATH'] = join(prefix, 'bin') + os.pathsep + conda_env['PATH']
conda_env['PATH'] = prefix + os.pathsep + conda_env['PATH']
cmd_list = [cmd]
cmd_list.extend(args)
try:
p = Popen(cmd_list, env=conda_env, stdin=stdin, stdout=stdout, stderr=stderr)
except OSError:
raise Exception("could not invoke %r\n" % cmd_list)
return p
def _setup_config_from_kwargs(kwargs):
cmd_list = ['--json', '--force']
if 'file' in kwargs:
cmd_list.extend(['--file', kwargs['file']])
if 'system' in kwargs:
cmd_list.append('--system')
return cmd_list
def config_path(**kwargs):
"""
Get the path to the config file.
"""
cmd_list = ['config', '--get']
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result['rc_path']
def config_get(*keys, **kwargs):
"""
Get the values of configuration keys.
Returns a dictionary of values. Note, the key may not be in the
dictionary if the key wasn't set in the configuration file.
"""
cmd_list = ['config', '--get']
cmd_list.extend(keys)
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result['get']
def config_set(key, value, **kwargs):
"""
Set a key to a (bool) value.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--set', key, str(value)]
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result.get('warnings', [])
def config_add(key, value, **kwargs):
"""
Add a value to a key.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--add', key, value]
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result.get('warnings', [])
def config_remove(key, value, **kwargs):
"""
Remove a value from a key.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--remove', key, value]
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result.get('warnings', [])
def config_delete(key, **kwargs):
"""
Remove a key entirely.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--remove-key', key]
cmd_list.extend(_setup_config_from_kwargs(kwargs))
result = _call_and_parse(cmd_list, abspath=kwargs.get('abspath', True))
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result.get('warnings', [])
def run(command, abspath=True):
"""
Launch the specified app by name or full package name.
Returns a dictionary containing the key "fn", whose value is the full
package (ending in ``.tar.bz2``) of the app.
"""
cmd_list = ['run', '--json', command]
result = _call_and_parse(cmd_list, abspath=abspath)
if 'error' in result:
raise CondaError('conda %s: %s' % (" ".join(cmd_list), result['error']))
return result
def test():
"""
Self-test function, which prints useful debug information.
This function returns None on success, and will crash the interpreter
on failure.
"""
print('sys.version: %r' % sys.version)
print('sys.prefix : %r' % sys.prefix)
print('conda_api.__version__: %r' % __version__)
print('conda_api.ROOT_PREFIX: %r' % ROOT_PREFIX)
if isdir(ROOT_PREFIX):
conda_version = get_conda_version()
print('conda version: %r' % conda_version)
print('conda info:')
d = info()
for kv in d.items():
print('\t%s=%r' % kv)
assert d['conda_version'] == conda_version
else:
print('Warning: no such directory: %r' % ROOT_PREFIX)
print('OK')
if __name__ == '__main__':
from optparse import OptionParser
p = OptionParser(usage="usage: %prog [options] [ROOT_PREFIX]",
description="self-test conda-api")
opts, args = p.parse_args()
if len(args) == 0:
set_root_prefix()
elif len(args) == 1:
set_root_prefix(args[0])
else:
p.error('did not expect more than one argument, try -h')
test()
| gpl-3.0 | 8,817,906,200,453,926,000 | 28.294425 | 88 | 0.595897 | false |
Andy43000/lingua101 | german_adverbs.py | 1 | 1520 |
#top25germanadverbs
import random
while True: #initiate loop
d = { 'eben ':'just now',
'erst ':'first',
'natürlich ': 'naturally',
'vielleicht ':'perhaps',
'dort ': 'there',
'auch ':'also',
'so ':'so',
'dann ':'then',
'da ':'there',
'noch ': 'still, yet',
'also ':'so',
'nur ' :'only',
'schon ':'already',
'mehr ':'more',
'jetzt ' :'now',
'immer ' :'always',
'sehr ':'very',
'hier ' :'here',
'doch ' :'but, still',
'wieder ':'again',
'eigentlich ': 'actually',
'oben ': 'above',
'nun,nu ': 'now',
'heute ' : 'today',
'weit ' : 'widely, far'}
question = random.choice(list(d.keys())) #get random key
user = input(question) #pose question with key and get answer
# print(question)
#d.values
answer = d.get(question) #use the key to get its corresponding value.
print(answer)
user_blank = user.strip() #remove blank spaces
if user_blank == answer: #return verdict
print('Correct!')
#if user == 'exit':
# break
else:
print('incorrect!')
if user_blank == 'exit':
break
| apache-2.0 | -4,658,886,380,276,498,000 | 26.618182 | 77 | 0.399605 | false |
abusse/cinder | cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py | 1 | 18776 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
HP LeftHand SAN ISCSI Driver.
The driver communicates to the backend aka Cliq via SSH to perform all the
operations on the SAN.
"""
from lxml import etree
from oslo_concurrency import processutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
LOG = logging.getLogger(__name__)
class HPLeftHandCLIQProxy(SanISCSIDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
We use the CLIQ interface, over SSH.
Rough overview of CLIQ commands used:
:createVolume: (creates the volume)
:deleteVolume: (deletes the volume)
:modifyVolume: (extends the volume)
:createSnapshot: (creates the snapshot)
:deleteSnapshot: (deletes the snapshot)
:cloneSnapshot: (creates the volume from a snapshot)
:getVolumeInfo: (to discover the IQN etc)
:getSnapshotInfo: (to discover the IQN etc)
:getClusterInfo: (to discover the iSCSI target IP address)
The 'trick' here is that the HP SAN enforces security by default, so
normally a volume mount would need both to configure the SAN in the volume
layer and do the mount on the compute layer. Multi-layer operations are
not catered for at the moment in the cinder architecture, so instead we
share the volume using CHAP at volume creation time. Then the mount need
only use those CHAP credentials, so can take place exclusively in the
compute layer.
Version history:
1.0.0 - Initial driver
1.1.0 - Added create/delete snapshot, extend volume, create volume
from snapshot support.
1.2.0 - Ported into the new HP LeftHand driver.
1.2.1 - Fixed bug #1279897, HP LeftHand CLIQ proxy may return incorrect
capacity values.
1.2.2 - Fixed driver with Paramiko 1.13.0, bug #1298608.
"""
VERSION = "1.2.2"
device_stats = {}
def __init__(self, *args, **kwargs):
super(HPLeftHandCLIQProxy, self).__init__(*args, **kwargs)
self.cluster_vip = None
def do_setup(self, context):
pass
def check_for_setup_error(self):
pass
def get_version_string(self):
return (_('CLIQ %(proxy_ver)s') % {'proxy_ver': self.VERSION})
def _cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Runs a CLIQ command over SSH, without doing any result parsing."""
cmd_list = [verb]
for k, v in cliq_args.items():
cmd_list.append("%s=%s" % (k, v))
return self._run_ssh(cmd_list, check_exit_code)
def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
"""Runs a CLIQ command over SSH, parsing and checking the output."""
cliq_args['output'] = 'XML'
(out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result)
LOG.debug("CLIQ command returned %s", out)
result_xml = etree.fromstring(out.encode('utf8'))
if check_cliq_result:
response_node = result_xml.find("response")
if response_node is None:
msg = (_("Malformed response to CLIQ command "
"%(verb)s %(cliq_args)s. Result=%(out)s") %
{'verb': verb, 'cliq_args': cliq_args, 'out': out})
raise exception.VolumeBackendAPIException(data=msg)
result_code = response_node.attrib.get("result")
if result_code != "0":
msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
" Result=%(out)s") %
{'verb': verb, 'cliq_args': cliq_args, 'out': out})
raise exception.VolumeBackendAPIException(data=msg)
return result_xml
def _cliq_get_cluster_info(self, cluster_name):
"""Queries for info about the cluster (including IP)."""
cliq_args = {}
cliq_args['clusterName'] = cluster_name
cliq_args['searchDepth'] = '1'
cliq_args['verbose'] = '0'
result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
return result_xml
def _cliq_get_cluster_vip(self, cluster_name):
"""Gets the IP on which a cluster shares iSCSI volumes."""
cluster_xml = self._cliq_get_cluster_info(cluster_name)
vips = []
for vip in cluster_xml.findall("response/cluster/vip"):
vips.append(vip.attrib.get('ipAddress'))
if len(vips) == 1:
return vips[0]
_xml = etree.tostring(cluster_xml)
msg = (_("Unexpected number of virtual ips for cluster "
" %(cluster_name)s. Result=%(_xml)s") %
{'cluster_name': cluster_name, '_xml': _xml})
raise exception.VolumeBackendAPIException(data=msg)
def _cliq_get_volume_info(self, volume_name):
"""Gets the volume info, including IQN."""
cliq_args = {}
cliq_args['volumeName'] = volume_name
result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
# Result looks like this:
# <gauche version="1.0">
# <response description="Operation succeeded." name="CliqSuccess"
# processingTime="87" result="0">
# <volume autogrowPages="4" availability="online" blockSize="1024"
# bytesWritten="0" checkSum="false" clusterName="Cluster01"
# created="2011-02-08T19:56:53Z" deleting="false" description=""
# groupName="Group01" initialQuota="536870912" isPrimary="true"
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
# maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
# minReplication="1" name="vol-b" parity="0" replication="2"
# reserveQuota="536870912" scratchQuota="4194304"
# serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
# size="1073741824" stridePages="32" thinProvision="true">
# <status description="OK" value="2"/>
# <permission access="rw"
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
# chapName="chapusername" chapRequired="true" id="25369"
# initiatorSecret="" iqn="" iscsiEnabled="true"
# loadBalance="true" targetSecret="supersecret"/>
# </volume>
# </response>
# </gauche>
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
volume_attributes = {}
volume_node = result_xml.find("response/volume")
for k, v in volume_node.attrib.items():
volume_attributes["volume." + k] = v
status_node = volume_node.find("status")
if status_node is not None:
for k, v in status_node.attrib.items():
volume_attributes["status." + k] = v
# We only consider the first permission node
permission_node = volume_node.find("permission")
if permission_node is not None:
for k, v in status_node.attrib.items():
volume_attributes["permission." + k] = v
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s" %
{'volume_name': volume_name,
'volume_attributes': volume_attributes})
return volume_attributes
def _cliq_get_snapshot_info(self, snapshot_name):
"""Gets the snapshot info, including IQN."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot_name
result_xml = self._cliq_run_xml("getSnapshotInfo", cliq_args)
# Result looks like this:
# <gauche version="1.0">
# <response description="Operation succeeded." name="CliqSuccess"
# processingTime="87" result="0">
# <snapshot applicationManaged="false" autogrowPages="32768"
# automatic="false" availability="online" bytesWritten="0"
# clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
# deleting="false" description="" groupName="CloudMgmtGroup1"
# id="730" initialQuota="536870912" isPrimary="true"
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudmgmtgroup1:73"
# md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
# name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
# replication="2" reserveQuota="536870912" scheduleId="0"
# scratchQuota="4194304" scratchWritten="0"
# serialNumber="a64b4f850539c07fb5ce3cee5db1fcce00000000000002da"
# size="2147483648" stridePages="32"
# volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce00000000000002d">
# <status description="OK" value="2"/>
# <permission access="rw"
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
# chapName="chapusername" chapRequired="true" id="25369"
# initiatorSecret="" iqn="" iscsiEnabled="true"
# loadBalance="true" targetSecret="supersecret"/>
# </snapshot>
# </response>
# </gauche>
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
snapshot_attributes = {}
snapshot_node = result_xml.find("response/snapshot")
for k, v in snapshot_node.attrib.items():
snapshot_attributes["snapshot." + k] = v
status_node = snapshot_node.find("status")
if status_node is not None:
for k, v in status_node.attrib.items():
snapshot_attributes["status." + k] = v
# We only consider the first permission node
permission_node = snapshot_node.find("permission")
if permission_node is not None:
for k, v in status_node.attrib.items():
snapshot_attributes["permission." + k] = v
LOG.debug("Snapshot info: %(name)s => %(attributes)s" %
{'name': snapshot_name, 'attributes': snapshot_attributes})
return snapshot_attributes
def create_volume(self, volume):
"""Creates a volume."""
cliq_args = {}
cliq_args['clusterName'] = self.configuration.san_clustername
if self.configuration.san_thin_provision:
cliq_args['thinProvision'] = '1'
else:
cliq_args['thinProvision'] = '0'
cliq_args['volumeName'] = volume['name']
cliq_args['size'] = '%sGB' % volume['size']
self._cliq_run_xml("createVolume", cliq_args)
return self._get_model_update(volume['name'])
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['size'] = '%sGB' % new_size
self._cliq_run_xml("modifyVolume", cliq_args)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['volumeName'] = volume['name']
self._cliq_run_xml("cloneSnapshot", cliq_args)
return self._get_model_update(volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['volumeName'] = snapshot['volume_name']
cliq_args['inheritAccess'] = 1
self._cliq_run_xml("createSnapshot", cliq_args)
def delete_volume(self, volume):
"""Deletes a volume."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['prompt'] = 'false' # Don't confirm
try:
self._cliq_get_volume_info(volume['name'])
except processutils.ProcessExecutionError:
LOG.error(_LE("Volume did not exist. It will not be deleted"))
return
self._cliq_run_xml("deleteVolume", cliq_args)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['prompt'] = 'false' # Don't confirm
try:
self._cliq_get_snapshot_info(snapshot['name'])
except processutils.ProcessExecutionError:
LOG.error(_LE("Snapshot did not exist. It will not be deleted"))
return
try:
self._cliq_run_xml("deleteSnapshot", cliq_args)
except Exception as ex:
in_use_msg = 'cannot be deleted because it is a clone point'
if in_use_msg in ex.message:
raise exception.SnapshotIsBusy(ex)
raise exception.VolumeBackendAPIException(ex)
def local_path(self, volume):
msg = _("local_path not supported")
raise exception.VolumeBackendAPIException(data=msg)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host. HP VSA requires a volume to be assigned
to a server.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value:
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_protal': '127.0.0.1:3260',
'volume_id': 1,
}
}
"""
self._create_server(connector)
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['serverName'] = connector['host']
self._cliq_run_xml("assignVolumeToServer", cliq_args)
iscsi_data = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_data
}
def _create_server(self, connector):
cliq_args = {}
cliq_args['serverName'] = connector['host']
out = self._cliq_run_xml("getServerInfo", cliq_args, False)
response = out.find("response")
result = response.attrib.get("result")
if result != '0':
cliq_args = {}
cliq_args['serverName'] = connector['host']
cliq_args['initiator'] = connector['initiator']
self._cliq_run_xml("createServer", cliq_args)
def _get_model_update(self, volume_name):
volume_info = self._cliq_get_volume_info(volume_name)
cluster_name = volume_info['volume.clusterName']
iscsi_iqn = volume_info['volume.iscsiIqn']
# TODO(justinsb): Is this always 1? Does it matter?
cluster_interface = '1'
if not self.cluster_vip:
self.cluster_vip = self._cliq_get_cluster_vip(cluster_name)
iscsi_portal = self.cluster_vip + ":3260," + cluster_interface
model_update = {}
# NOTE(jdg): LH volumes always at lun 0 ?
model_update['provider_location'] = ("%s %s %s" %
(iscsi_portal,
iscsi_iqn,
0))
return model_update
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['serverName'] = connector['host']
self._cliq_run_xml("unassignVolumeToServer", cliq_args)
def get_volume_stats(self, refresh=False):
if refresh:
self._update_backend_status()
return self.device_stats
def _update_backend_status(self):
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['reserved_percentage'] = 0
data['storage_protocol'] = 'iSCSI'
data['vendor_name'] = 'Hewlett-Packard'
result_xml = self._cliq_run_xml(
"getClusterInfo", {
'searchDepth': 1,
'clusterName': self.configuration.san_clustername})
cluster_node = result_xml.find("response/cluster")
total_capacity = cluster_node.attrib.get("spaceTotal")
free_capacity = cluster_node.attrib.get("unprovisionedSpace")
GB = units.Gi
data['total_capacity_gb'] = int(total_capacity) / GB
data['free_capacity_gb'] = int(free_capacity) / GB
self.device_stats = data
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
"""
return False
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
return (False, None)
| apache-2.0 | -1,048,060,943,674,934,800 | 38.035343 | 79 | 0.597199 | false |
whowutwut/confluent | confluent_server/confluent/neighutil.py | 1 | 2087 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A consolidated manage of neighbor table information management.
# Ultimately, this should use AF_NETLINK, but in the interest of time,
# use ip neigh for the moment
import eventlet.green.subprocess as subprocess
import os
neightable = {}
neightime = 0
import re
_validmac = re.compile('..:..:..:..:..:..')
def update_neigh():
global neightable
global neightime
neightable = {}
if os.name == 'nt':
return
ipn = subprocess.Popen(['ip', 'neigh'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(neighdata, err) = ipn.communicate()
for entry in neighdata.split('\n'):
entry = entry.split(' ')
if len(entry) < 5 or not entry[4]:
continue
if entry[0] in ('192.168.0.100', '192.168.70.100', '192.168.70.125'):
# Note that these addresses are common static ip addresses
# that are hopelessly ambiguous if there are many
# so ignore such entries and move on
# ideally the system network steers clear of this landmine of
# a subnet, but just in case
continue
if not _validmac.match(entry[4]):
continue
neightable[entry[0]] = entry[4]
neightime = os.times()[4]
def refresh_neigh():
global neightime
if os.name == 'nt':
return
if os.times()[4] > (neightime + 30):
update_neigh()
| apache-2.0 | -2,022,119,195,281,933,000 | 31.609375 | 77 | 0.637278 | false |
DedMemez/ODS-August-2017 | building/DistributedPetshopInterior.py | 1 | 1383 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.building.DistributedPetshopInterior
from direct.distributed.DistributedObject import DistributedObject
from direct.actor.Actor import Actor
from RandomBuilding import RandomBuilding
class DistributedPetshopInterior(DistributedObject, RandomBuilding):
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.setup()
def setup(self):
randomGen = self.getRandomGen()
colors = self.getColors()
self.interior = loader.loadModel('phase_4/models/modules/PetShopInterior')
self.interior.reparentTo(render)
self.fish = Actor('phase_4/models/props/interiorfish-zero', {'swim': 'phase_4/models/props/interiorfish-swim'})
self.fish.reparentTo(self.interior)
self.fish.setColorScale(0.8, 0.9, 1, 0.8)
self.fish.setScale(0.8)
self.fish.setPos(0, 6, -4)
self.fish.setPlayRate(0.7, 'swim')
self.fish.loop('swim')
if settings['smoothAnimations']:
self.fish.setBlend(frameBlend=True)
self.setupDoor(randomGen, colors, self.interior, -0.25)
self.resetNPCs()
def disable(self):
self.fish.cleanup()
del self.fish
self.interior.removeNode()
del self.interior
DistributedObject.disable(self) | apache-2.0 | 8,962,982,952,427,555,000 | 38.735294 | 119 | 0.670282 | false |
popazerty/openhdf-enigma2 | lib/python/Components/Network.py | 1 | 23887 | import re
import os
from socket import *
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from boxbranding import getBoxType
class Network:
def __init__(self):
self.ifaces = {}
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "ethtool"
self.Console = Console()
self.LinkConsole = Console()
self.restartConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateInterfaceConsole = Console()
self.resetNetworkConsole = Console()
self.DnsConsole = Console()
self.PingConsole = Console()
self.config_ready = None
self.friendlyNames = {}
self.lan_interfaces = []
self.wlan_interfaces = []
self.remoteRootFS = None
self.getInterfaces()
def onRemoteRootFS(self):
if self.remoteRootFS is None:
import Harddisk
for parts in Harddisk.getProcMounts():
if parts[1] == '/' and parts[2] == 'nfs':
self.remoteRootFS = True
break
else:
self.remoteRootFS = False
return self.remoteRootFS
def isBlacklisted(self, iface):
return iface in ('lo', 'wifi0', 'wmaster0', 'sit0', 'tun0', 'tap0')
def getInterfaces(self, callback = None):
self.configuredInterfaces = []
for device in self.getInstalledAdapters():
self.getAddrInet(device, callback)
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
return None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
return [ int(n) for n in ip.split('.') ]
def getAddrInet(self, iface, callback):
if not self.Console:
self.Console = Console()
cmd = "ip -o addr show dev " + iface
self.Console.ePopen(cmd, self.IPaddrFinished, [iface,callback])
def IPaddrFinished(self, result, retval, extra_args):
(iface, callback ) = extra_args
data = { 'up': False, 'dhcp': False, 'preup' : False, 'predown' : False }
globalIPpattern = re.compile("scope global")
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
netRegexp = '[0-9]{1,2}'
macRegexp = '[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}'
ipLinePattern = re.compile('inet ' + ipRegexp + '/')
ipPattern = re.compile(ipRegexp)
netmaskLinePattern = re.compile('/' + netRegexp)
netmaskPattern = re.compile(netRegexp)
bcastLinePattern = re.compile(' brd ' + ipRegexp)
upPattern = re.compile('UP')
macPattern = re.compile(macRegexp)
macLinePattern = re.compile('link/ether ' + macRegexp)
for line in result.splitlines():
split = line.strip().split(' ',2)
if split[1][:-1] == iface:
up = self.regExpMatch(upPattern, split[2])
mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
if up is not None:
data['up'] = True
if iface is not 'lo':
self.configuredInterfaces.append(iface)
if mac is not None:
data['mac'] = mac
if split[1] == iface:
if re.search(globalIPpattern, split[2]):
ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
if ip is not None:
data['ip'] = self.convertIP(ip)
if netmask is not None:
data['netmask'] = self.convertIP(netmask)
if bcast is not None:
data['bcast'] = self.convertIP(bcast)
if not data.has_key('ip'):
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
cmd = "route -n | grep " + iface
self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback])
def routeFinished(self, result, retval, extra_args):
(iface, data, callback) = extra_args
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
ipPattern = re.compile(ipRegexp)
ipLinePattern = re.compile(ipRegexp)
for line in result.splitlines():
print line[0:7]
if line[0:7] == "0.0.0.0":
gateway = self.regExpMatch(ipPattern, line[16:31])
if gateway:
data['gateway'] = self.convertIP(gateway)
self.ifaces[iface] = data
self.loadNetworkConfig(iface,callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up']:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp']:
fp.write("iface "+ ifacename +" inet dhcp\n")
fp.write(" hostname $(hostname)\n")
if not iface['dhcp']:
fp.write("iface "+ ifacename +" inet static\n")
fp.write(" hostname $(hostname)\n")
if iface.has_key('ip'):
# print tuple(iface['ip'])
fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if iface.has_key('gateway'):
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if iface.has_key("configStrings"):
fp.write(iface["configStrings"])
if iface["preup"] is not False and not iface.has_key("configStrings"):
fp.write(iface["preup"])
if iface["predown"] is not False and not iface.has_key("configStrings"):
fp.write(iface["predown"])
fp.write("\n")
fp.close()
self.configuredNetworkAdapters = self.configuredInterfaces
self.writeNameserverConfig()
def writeNameserverConfig(self):
try:
os.system('rm -rf /etc/resolv.conf')
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
except:
print "[Network.py] interfaces - resolv.conf write failed"
def loadNetworkConfig(self,iface,callback = None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if split[0] == "iface":
currif = split[1]
ifaces[currif] = {}
if len(split) == 4 and split[3] == "dhcp":
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if currif == iface: #read information only for available interfaces
if split[0] == "address":
ifaces[currif]["address"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("ip"):
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if split[0] == "netmask":
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("netmask"):
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if split[0] == "gateway":
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("gateway"):
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if split[0] == "pre-up":
if self.ifaces[currif].has_key("preup"):
self.ifaces[currif]["preup"] = i
if split[0] in ("pre-down","post-down"):
if self.ifaces[currif].has_key("predown"):
self.ifaces[currif]["predown"] = i
for ifacename, iface in ifaces.items():
if self.ifaces.has_key(ifacename):
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if self.Console:
if len(self.Console.appContainers) == 0:
# save configured interfacelist
self.configuredNetworkAdapters = self.configuredInterfaces
# load ns only once
self.loadNameserverConfig()
# print "read configured interface:", ifaces
# print "self.ifaces after loading:", self.ifaces
self.config_ready = True
self.msgPlugins()
if callback is not None:
callback(True)
def loadNameserverConfig(self):
ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
nameserverPattern = re.compile("nameserver +" + ipRegexp)
ipPattern = re.compile(ipRegexp)
resolv = []
try:
fp = file('/etc/resolv.conf', 'r')
resolv = fp.readlines()
fp.close()
self.nameservers = []
except:
print "[Network.py] resolv.conf - opening failed"
for line in resolv:
if self.regExpMatch(nameserverPattern, line) is not None:
ip = self.regExpMatch(ipPattern, line)
if ip:
self.nameservers.append(self.convertIP(ip))
# print "nameservers:", self.nameservers
def getInstalledAdapters(self):
return [x for x in os.listdir('/sys/class/net') if not self.isBlacklisted(x)]
def getConfiguredAdapters(self):
return self.configuredNetworkAdapters
def getNumberOfAdapters(self):
return len(self.ifaces)
def getFriendlyAdapterName(self, x):
if x in self.friendlyNames.keys():
return self.friendlyNames.get(x, x)
self.friendlyNames[x] = self.getFriendlyAdapterNaming(x)
return self.friendlyNames.get(x, x) # when we have no friendly name, use adapter name
def getFriendlyAdapterNaming(self, iface):
name = None
if self.isWirelessInterface(iface):
if iface not in self.wlan_interfaces:
name = _("WLAN connection")
if len(self.wlan_interfaces):
name += " " + str(len(self.wlan_interfaces)+1)
self.wlan_interfaces.append(iface)
else:
if iface not in self.lan_interfaces:
if getBoxType() == "et10000" and iface == "eth1":
name = _("VLAN connection")
else:
name = _("LAN connection")
if len(self.lan_interfaces) and not getBoxType() == "et10000" and not iface == "eth1":
name += " " + str(len(self.lan_interfaces)+1)
self.lan_interfaces.append(iface)
return name
def getFriendlyAdapterDescription(self, iface):
if not self.isWirelessInterface(iface):
return _('Ethernet network interface')
moduledir = self.getWlanModuleDir(iface)
if moduledir:
name = os.path.basename(os.path.realpath(moduledir))
if name in ('ath_pci','ath5k'):
name = 'Atheros'
elif name in ('rt73','rt73usb','rt3070sta'):
name = 'Ralink'
elif name == 'zd1211b':
name = 'Zydas'
elif name == 'r871x_usb_drv':
name = 'Realtek'
else:
name = _('Unknown')
return name + ' ' + _('wireless network interface')
def getAdapterName(self, iface):
return iface
def getAdapterList(self):
return self.ifaces.keys()
def getAdapterAttribute(self, iface, attribute):
return self.ifaces.get(iface, {}).get(attribute)
def setAdapterAttribute(self, iface, attribute, value):
# print "setting for adapter", iface, "attribute", attribute, " to value", value
if self.ifaces.has_key(iface):
self.ifaces[iface][attribute] = value
def removeAdapterAttribute(self, iface, attribute):
if self.ifaces.has_key(iface):
if self.ifaces[iface].has_key(attribute):
del self.ifaces[iface][attribute]
def getNameserverList(self):
if len(self.nameservers) == 0:
return [[0, 0, 0, 0], [0, 0, 0, 0]]
else:
return self.nameservers
def clearNameservers(self):
self.nameservers = []
def addNameserver(self, nameserver):
if nameserver not in self.nameservers:
self.nameservers.append(nameserver)
def removeNameserver(self, nameserver):
if nameserver in self.nameservers:
self.nameservers.remove(nameserver)
def changeNameserver(self, oldnameserver, newnameserver):
if oldnameserver in self.nameservers:
for i in range(len(self.nameservers)):
if self.nameservers[i] == oldnameserver:
self.nameservers[i] = newnameserver
def resetNetworkConfig(self, mode='lan', callback = None):
self.resetNetworkConsole = Console()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ip addr flush dev " + iface + " scope global")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinishedCB, [mode, callback], debug=True)
def resetNetworkFinishedCB(self, extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
self.writeDefaultNetworkConfig(mode, callback)
def writeDefaultNetworkConfig(self,mode='lan', callback = None):
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
if mode == 'wlan':
fp.write("auto wlan0\n")
fp.write("iface wlan0 inet dhcp\n")
if mode == 'wlan-mpci':
fp.write("auto ath0\n")
fp.write("iface ath0 inet dhcp\n")
if mode == 'lan':
fp.write("auto eth0\n")
fp.write("iface eth0 inet dhcp\n")
fp.write("\n")
fp.close()
self.resetNetworkConsole = Console()
self.commands = []
if mode == 'wlan':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("ifconfig wlan0 up")
if mode == 'wlan-mpci':
self.commands.append("ifconfig eth0 down")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 up")
if mode == 'lan':
self.commands.append("ifconfig eth0 up")
self.commands.append("ifconfig wlan0 down")
self.commands.append("ifconfig ath0 down")
self.commands.append("/etc/init.d/avahi-daemon start")
self.resetNetworkConsole.eBatch(self.commands, self.resetNetworkFinished, [mode,callback], debug=True)
def resetNetworkFinished(self,extra_args):
(mode, callback) = extra_args
if len(self.resetNetworkConsole.appContainers) == 0:
if callback is not None:
callback(True,mode)
def checkNetworkState(self,statecallback):
self.NetworkState = 0
cmd1 = "ping -c 1 www.google.de"
cmd2 = "ping -c 1 www.google.com"
cmd3 = "ping -c 1 www.google.nl"
self.PingConsole = Console()
self.PingConsole.ePopen(cmd1, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd2, self.checkNetworkStateFinished,statecallback)
self.PingConsole.ePopen(cmd3, self.checkNetworkStateFinished,statecallback)
def checkNetworkStateFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.PingConsole is not None:
if retval == 0:
self.PingConsole = None
statecallback(self.NetworkState)
else:
self.NetworkState += 1
if len(self.PingConsole.appContainers) == 0:
statecallback(self.NetworkState)
def restartNetwork(self,callback = None):
self.restartConsole = Console()
self.config_ready = False
self.msgPlugins()
self.commands = []
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in self.ifaces.keys():
if iface != 'eth0' or not self.onRemoteRootFS():
self.commands.append("ifdown " + iface)
self.commands.append("ip addr flush dev " + iface + " scope global")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.restartConsole.eBatch(self.commands, self.restartNetworkFinished, callback, debug=True)
def restartNetworkFinished(self,extra_args):
( callback ) = extra_args
if callback is not None:
try:
callback(True)
except:
pass
def getLinkState(self,iface,callback):
cmd = self.ethtool_bin + " " + iface
self.LinkConsole = Console()
self.LinkConsole.ePopen(cmd, self.getLinkStateFinished,callback)
def getLinkStateFinished(self, result, retval,extra_args):
(callback) = extra_args
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers) == 0:
callback(result)
def stopPingConsole(self):
if self.PingConsole is not None:
if len(self.PingConsole.appContainers):
for name in self.PingConsole.appContainers.keys():
self.PingConsole.kill(name)
def stopLinkStateConsole(self):
if self.LinkConsole is not None:
if len(self.LinkConsole.appContainers):
for name in self.LinkConsole.appContainers.keys():
self.LinkConsole.kill(name)
def stopDNSConsole(self):
if self.DnsConsole is not None:
if len(self.DnsConsole.appContainers):
for name in self.DnsConsole.appContainers.keys():
self.DnsConsole.kill(name)
def stopRestartConsole(self):
if self.restartConsole is not None:
if len(self.restartConsole.appContainers):
for name in self.restartConsole.appContainers.keys():
self.restartConsole.kill(name)
def stopGetInterfacesConsole(self):
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
def stopDeactivateInterfaceConsole(self):
if self.deactivateInterfaceConsole is not None:
self.deactivateInterfaceConsole.killAll()
self.deactivateInterfaceConsole = None
def stopActivateInterfaceConsole(self):
if self.activateInterfaceConsole is not None:
self.activateInterfaceConsole.killAll()
self.activateInterfaceConsole = None
def checkforInterface(self, iface):
return self.getAdapterAttribute(iface, 'up')
def checkDNSLookup(self,statecallback):
cmd1 = "nslookup www.dream-multimedia-tv.de"
cmd2 = "nslookup www.heise.de"
cmd3 = "nslookup www.google.de"
self.DnsConsole = Console()
self.DnsConsole.ePopen(cmd1, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd2, self.checkDNSLookupFinished,statecallback)
self.DnsConsole.ePopen(cmd3, self.checkDNSLookupFinished,statecallback)
def checkDNSLookupFinished(self, result, retval,extra_args):
(statecallback) = extra_args
if self.DnsConsole is not None:
if retval == 0:
self.DnsConsole = None
statecallback(self.DnsState)
else:
self.DnsState += 1
if len(self.DnsConsole.appContainers) == 0:
statecallback(self.DnsState)
def deactivateInterface(self,ifaces,callback = None):
self.config_ready = False
self.msgPlugins()
commands = []
def buildCommands(iface):
commands.append("ifdown " + iface)
commands.append("ip addr flush dev " + iface + " scope global")
#wpa_supplicant sometimes doesn't quit properly on SIGTERM
if os.path.exists('/var/run/wpa_supplicant/'+ iface):
commands.append("wpa_cli -i" + iface + " terminate")
if not self.deactivateInterfaceConsole:
self.deactivateInterfaceConsole = Console()
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if iface != 'eth0' or not self.onRemoteRootFS():
buildCommands(iface)
else:
if ifaces == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
buildCommands(ifaces)
self.deactivateInterfaceConsole.eBatch(commands, self.deactivateInterfaceFinished, [ifaces,callback], debug=True)
def deactivateInterfaceFinished(self,extra_args):
(ifaces, callback) = extra_args
def checkCommandResult(iface):
if self.deactivateInterfaceConsole and self.deactivateInterfaceConsole.appResults.has_key("ifdown " + iface):
result = str(self.deactivateInterfaceConsole.appResults.get("ifdown " + iface)).strip("\n")
if result == "ifdown: interface " + iface + " not configured":
return False
else:
return True
#ifdown sometimes can't get the interface down.
if isinstance(ifaces, (list, tuple)):
for iface in ifaces:
if checkCommandResult(iface) is False:
Console().ePopen(("ifconfig " + iface + " down" ))
else:
if checkCommandResult(ifaces) is False:
Console().ePopen(("ifconfig " + ifaces + " down" ))
if self.deactivateInterfaceConsole:
if len(self.deactivateInterfaceConsole.appContainers) == 0:
if callback is not None:
callback(True)
def activateInterface(self,iface,callback = None):
if self.config_ready:
self.config_ready = False
self.msgPlugins()
if iface == 'eth0' and self.onRemoteRootFS():
if callback is not None:
callback(True)
return
if not self.activateInterfaceConsole:
self.activateInterfaceConsole = Console()
commands = ["ifup " + iface]
self.activateInterfaceConsole.eBatch(commands, self.activateInterfaceFinished, callback, debug=True)
def activateInterfaceFinished(self,extra_args):
callback = extra_args
if self.activateInterfaceConsole:
if len(self.activateInterfaceConsole.appContainers) == 0:
if callback is not None:
try:
callback(True)
except:
pass
def sysfsPath(self, iface):
return '/sys/class/net/' + iface
def isWirelessInterface(self, iface):
if iface in self.wlan_interfaces:
return True
if os.path.isdir(self.sysfsPath(iface) + '/wireless'):
return True
# r871x_usb_drv on kernel 2.6.12 is not identifiable over /sys/class/net/'ifacename'/wireless so look also inside /proc/net/wireless
device = re.compile('[a-z]{2,}[0-9]*:')
ifnames = []
fp = open('/proc/net/wireless', 'r')
for line in fp:
try:
ifnames.append(device.search(line).group()[:-1])
except AttributeError:
pass
fp.close()
if iface in ifnames:
return True
return False
def getWlanModuleDir(self, iface = None):
devicedir = self.sysfsPath(iface) + '/device'
moduledir = devicedir + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# identification is not possible over default moduledir
for x in os.listdir(devicedir):
# rt3070 on kernel 2.6.18 registers wireless devices as usb_device (e.g. 1-1.3:1.0) and identification is only possible over /sys/class/net/'ifacename'/device/1-xxx
if x.startswith("1-"):
moduledir = devicedir + '/' + x + '/driver/module'
if os.path.isdir(moduledir):
return moduledir
# rt73, zd1211b, r871x_usb_drv on kernel 2.6.12 can be identified over /sys/class/net/'ifacename'/device/driver, so look also here
moduledir = devicedir + '/driver'
if os.path.isdir(moduledir):
return moduledir
return None
def detectWlanModule(self, iface = None):
if not self.isWirelessInterface(iface):
return None
devicedir = self.sysfsPath(iface) + '/device'
if os.path.isdir(devicedir + '/ieee80211'):
return 'nl80211'
moduledir = self.getWlanModuleDir(iface)
if moduledir:
module = os.path.basename(os.path.realpath(moduledir))
if module in ('ath_pci','ath5k'):
return 'madwifi'
if module in ('rt73','rt73'):
return 'ralink'
if module == 'zd1211b':
return 'zydas'
return 'wext'
def calc_netmask(self,nmask):
from struct import pack, unpack
from socket import inet_ntoa, inet_aton
mask = 1L<<31
xnet = (1L<<32)-1
cidr_range = range(0, 32)
cidr = long(nmask)
if cidr not in cidr_range:
print 'cidr invalid: %d' % cidr
return None
else:
nm = ((1L<<cidr)-1)<<(32-cidr)
netmask = str(inet_ntoa(pack('>L', nm)))
return netmask
def msgPlugins(self):
if self.config_ready is not None:
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=self.config_ready)
def hotplug(self, event):
interface = event['INTERFACE']
if self.isBlacklisted(interface):
return
action = event['ACTION']
if action == "add":
print "[Network] Add new interface:", interface
self.getAddrInet(interface, None)
elif action == "remove":
print "[Network] Removed interface:", interface
try:
del self.ifaces[interface]
except KeyError:
pass
iNetwork = Network()
def InitNetwork():
pass
| gpl-2.0 | -3,468,733,016,651,992,000 | 32.786421 | 167 | 0.689036 | false |
vhaasteren/pysolvepulsar | setup.py | 1 | 1445 | import os
import sys
import numpy
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
setup(
name="pysolvepulsar",
version='2015.05',
description="Algorithmic timing package",
long_description=open("README.md").read() + "\n\n"
+ "Changelog\n"
+ "---------\n\n"
+ open("HISTORY.md").read(),
author="Rutger van Haasteren",
author_email="[email protected]",
url="http://github.com/vhaasteren/pysolvepulsar/",
license="GPLv3",
package_data={"": ["README", "LICENSE", "AUTHORS.md"]},
install_requires=["numpy", "scipy"],
include_package_data=True,
packages=["pysolvepulsar"],
py_modules = ['pysolvepulsar.pysolvepulsar',
'pysolvepulsar.candidate',
'pysolvepulsar.rankreduced',
'pysolvepulsar.units',
'pysolvepulsar.linearfitter'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
]
)
| gpl-3.0 | 546,810,181,393,561,340 | 28.489796 | 70 | 0.611073 | false |
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/lib/filebrowsebutton.py | 1 | 17383 | #----------------------------------------------------------------------
# Name: wxPython.lib.filebrowsebutton
# Purpose: Composite controls that provide a Browse button next to
# either a wxTextCtrl or a wxComboBox. The Browse button
# launches a wxFileDialog and loads the result into the
# other control.
#
# Author: Mike Fletcher
#
# RCS-ID: $Id$
# Copyright: (c) 2000 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/02/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 Compatability changes
#
import os
import types
import wx
#----------------------------------------------------------------------
class FileBrowseButton(wx.Panel):
"""
A control to allow the user to type in a filename or browse with
the standard file dialog to select file
"""
def __init__ (self, parent, id= -1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText= "File Entry:",
buttonText= "Browse",
toolTip= "Type filename or click browse to choose file",
# following are the values for a file dialog box
dialogTitle = "Choose a file",
startDirectory = ".",
initialValue = "",
fileMask = "*.*",
fileMode = wx.FD_OPEN,
# callback for when value changes (optional)
changeCallback= lambda x:x,
labelWidth = 0,
name = 'fileBrowseButton',
):
"""
:param labelText: Text for label to left of text field
:param buttonText: Text for button which launches the file dialog
:param toolTip: Help text
:param dialogTitle: Title used in file dialog
:param startDirectory: Default directory for file dialog startup
:param fileMask: File mask (glob pattern, such as *.*) to use in file dialog
:param fileMode: wx.FD_OPEN or wx.FD_SAVE, indicates type of file dialog to use
:param changeCallback: Optional callback called for all changes in value of the control
:param labelWidth: Width of the label
"""
# store variables
self.labelText = labelText
self.buttonText = buttonText
self.toolTip = toolTip
self.dialogTitle = dialogTitle
self.startDirectory = startDirectory
self.initialValue = initialValue
self.fileMask = fileMask
self.fileMode = fileMode
self.changeCallback = changeCallback
self.callCallback = True
self.labelWidth = labelWidth
# create the dialog
self.createDialog(parent, id, pos, size, style, name )
# Setting a value causes the changeCallback to be called.
# In this case that would be before the return of the
# constructor. Not good. So a default value on
# SetValue is used to disable the callback
self.SetValue( initialValue, 0)
def createDialog( self, parent, id, pos, size, style, name ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name)
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
self.label = self.createLabel( )
box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.LEFT|wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
# add a border around the whole thing and resize the panel to fit
outsidebox = wx.BoxSizer(wx.VERTICAL)
outsidebox.Add(box, 1, wx.EXPAND|wx.ALL, 3)
outsidebox.Fit(self)
self.SetAutoLayout(True)
self.SetSizer( outsidebox )
self.Layout()
if type( size ) == types.TupleType:
size = apply( wx.Size, size)
self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
# if size.width != -1 or size.height != -1:
# self.SetSize(size)
def SetBackgroundColour(self,color):
wx.Panel.SetBackgroundColour(self,color)
self.label.SetBackgroundColour(color)
def createLabel( self ):
"""Create the label/caption"""
label = wx.StaticText(self, -1, self.labelText, style =wx.ALIGN_RIGHT )
font = label.GetFont()
w, h, d, e = self.GetFullTextExtent(self.labelText, font)
if self.labelWidth > 0:
label.SetSize((self.labelWidth+5, h))
else:
label.SetSize((w+5, h))
return label
def createTextControl( self):
"""Create the text control"""
textControl = wx.TextCtrl(self, -1)
textControl.SetToolTipString( self.toolTip )
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
return textControl
def OnChanged(self, evt):
if self.callCallback and self.changeCallback:
self.changeCallback(evt)
def createBrowseButton( self):
"""Create the browse-button control"""
button =wx.Button(self, -1, self.buttonText)
button.SetToolTipString( self.toolTip )
button.Bind(wx.EVT_BUTTON, self.OnBrowse)
return button
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
directory = os.path.split(current)
if os.path.isdir( current):
directory = current
current = ''
elif directory and os.path.isdir( directory[0] ):
current = directory[1]
directory = directory [0]
else:
directory = self.startDirectory
current = ''
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
self.SetValue(dlg.GetPath())
dlg.Destroy()
def GetValue (self):
"""
retrieve current value of text control
"""
return self.textControl.GetValue()
def SetValue (self, value, callBack=1):
"""set current value of text control"""
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
def GetLabel( self ):
""" Retrieve the label's current text """
return self.label.GetLabel()
def SetLabel( self, value ):
""" Set the label's current text """
rvalue = self.label.SetLabel( value )
self.Refresh( True )
return rvalue
class FileBrowseButtonWithHistory( FileBrowseButton ):
"""
with following additions:
__init__(..., history=None)
history -- optional list of paths for initial history drop-down
(must be passed by name, not a positional argument)
If history is callable it will must return a list used
for the history drop-down
changeCallback -- as for FileBrowseButton, but with a work-around
for win32 systems which don't appear to create wx.EVT_COMBOBOX
events properly. There is a (slight) chance that this work-around
will cause some systems to create two events for each Combobox
selection. If you discover this condition, please report it!
As for a FileBrowseButton.__init__ otherwise.
GetHistoryControl()
Return reference to the control which implements interfaces
required for manipulating the history list. See GetHistoryControl
documentation for description of what that interface is.
GetHistory()
Return current history list
SetHistory( value=(), selectionIndex = None )
Set current history list, if selectionIndex is not None, select that index
"""
def __init__( self, *arguments, **namedarguments):
self.history = namedarguments.get( "history" )
if self.history:
del namedarguments["history"]
self.historyCallBack=None
if callable(self.history):
self.historyCallBack=self.history
self.history=None
name = namedarguments.get('name', 'fileBrowseButtonWithHistory')
namedarguments['name'] = name
FileBrowseButton.__init__(self, *arguments, **namedarguments)
def createTextControl( self):
"""Create the text control"""
textControl = wx.ComboBox(self, -1, style = wx.CB_DROPDOWN )
textControl.SetToolTipString( self.toolTip )
textControl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
if self.changeCallback:
textControl.Bind(wx.EVT_TEXT, self.OnChanged)
textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged)
if self.history:
history=self.history
self.history=None
self.SetHistory( history, control=textControl)
return textControl
def GetHistoryControl( self ):
"""
Return a pointer to the control which provides (at least)
the following methods for manipulating the history list:
Append( item ) -- add item
Clear() -- clear all items
Delete( index ) -- 0-based index to delete from list
SetSelection( index ) -- 0-based index to select in list
Semantics of the methods follow those for the wxComboBox control
"""
return self.textControl
def SetHistory( self, value=(), selectionIndex = None, control=None ):
"""Set the current history list"""
if control is None:
control = self.GetHistoryControl()
if self.history == value:
return
self.history = value
# Clear history values not the selected one.
tempValue=control.GetValue()
# clear previous values
control.Clear()
control.SetValue(tempValue)
# walk through, appending new values
for path in value:
control.Append( path )
if selectionIndex is not None:
control.SetSelection( selectionIndex )
def GetHistory( self ):
"""Return the current history list"""
if self.historyCallBack != None:
return self.historyCallBack()
elif self.history:
return list( self.history )
else:
return []
def OnSetFocus(self, event):
"""When the history scroll is selected, update the history"""
if self.historyCallBack != None:
self.SetHistory( self.historyCallBack(), control=self.textControl)
event.Skip()
if wx.Platform == "__WXMSW__":
def SetValue (self, value, callBack=1):
""" Convenient setting of text control value, works
around limitation of wx.ComboBox """
save = self.callCallback
self.callCallback = callBack
self.textControl.SetValue(value)
self.callCallback = save
# Hack to call an event handler
class LocalEvent:
def __init__(self, string):
self._string=string
def GetString(self):
return self._string
if callBack==1:
# The callback wasn't being called when SetValue was used ??
# So added this explicit call to it
self.changeCallback(LocalEvent(value))
class DirBrowseButton(FileBrowseButton):
def __init__(self, parent, id = -1,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.TAB_TRAVERSAL,
labelText = 'Select a directory:',
buttonText = 'Browse',
toolTip = 'Type directory name or browse to select',
dialogTitle = '',
startDirectory = '.',
changeCallback = None,
dialogClass = wx.DirDialog,
newDirectory = False,
name = 'dirBrowseButton'):
FileBrowseButton.__init__(self, parent, id, pos, size, style,
labelText, buttonText, toolTip,
dialogTitle, startDirectory,
changeCallback = changeCallback,
name = name)
self.dialogClass = dialogClass
self.newDirectory = newDirectory
#
def OnBrowse(self, ev = None):
style=0
if not self.newDirectory:
style |= wx.DD_DIR_MUST_EXIST
dialog = self.dialogClass(self,
message = self.dialogTitle,
defaultPath = self.startDirectory,
style = style)
if dialog.ShowModal() == wx.ID_OK:
self.SetValue(dialog.GetPath())
dialog.Destroy()
#
#----------------------------------------------------------------------
if __name__ == "__main__":
#from skeletonbuilder import rulesfile
class SimpleCallback:
def __init__( self, tag ):
self.tag = tag
def __call__( self, event ):
print self.tag, event.GetString()
class DemoFrame( wx.Frame ):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "File entry with browse", size=(500,260))
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
panel = wx.Panel (self,-1)
innerbox = wx.BoxSizer(wx.VERTICAL)
control = FileBrowseButton(
panel,
initialValue = "z:\\temp",
)
innerbox.Add( control, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "With History",
initialValue = "d:\\temp",
history = ["c:\\temp", "c:\\tmp", "r:\\temp","z:\\temp"],
changeCallback= SimpleCallback( "With History" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
middlecontrol = FileBrowseButtonWithHistory(
panel,
labelText = "History callback",
initialValue = "d:\\temp",
history = self.historyCallBack,
changeCallback= SimpleCallback( "History callback" ),
)
innerbox.Add( middlecontrol, 0, wx.EXPAND )
self.bottomcontrol = control = FileBrowseButton(
panel,
labelText = "With Callback",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN ,
changeCallback= SimpleCallback( "With Callback" ),
)
innerbox.Add( control, 0, wx.EXPAND)
self.bottommostcontrol = control = DirBrowseButton(
panel,
labelText = "Simple dir browse button",
style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN)
innerbox.Add( control, 0, wx.EXPAND)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Label", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeLabel , id=ID)
ID = wx.NewId()
innerbox.Add( wx.Button( panel, ID,"Change Value", ), 1, wx.EXPAND)
self.Bind(wx.EVT_BUTTON, self.OnChangeValue, id=ID )
panel.SetAutoLayout(True)
panel.SetSizer( innerbox )
self.history={"c:\\temp":1, "c:\\tmp":1, "r:\\temp":1,"z:\\temp":1}
def historyCallBack(self):
keys=self.history.keys()
keys.sort()
return keys
def OnFileNameChangedHistory (self, event):
self.history[event.GetString ()]=1
def OnCloseMe(self, event):
self.Close(True)
def OnChangeLabel( self, event ):
self.bottomcontrol.SetLabel( "Label Updated" )
def OnChangeValue( self, event ):
self.bottomcontrol.SetValue( "r:\\somewhere\\over\\the\\rainbow.htm" )
def OnCloseWindow(self, event):
self.Destroy()
class DemoApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame = DemoFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
def test( ):
app = DemoApp(0)
app.MainLoop()
print 'Creating dialog'
test( )
| mit | 3,643,485,071,056,536,000 | 35.78913 | 95 | 0.54421 | false |
pombredanne/parakeet | parakeet/llvm_backend/llvm_helpers.py | 2 | 1087 | import llvm.core as llcore
from .. ndtypes import ScalarT, FloatT, Int32, Int64
from llvm_types import llvm_value_type
def const(python_scalar, parakeet_type):
assert isinstance(parakeet_type, ScalarT)
llvm_type = llvm_value_type(parakeet_type)
if isinstance(parakeet_type, FloatT):
return llcore.Constant.real(llvm_type, float(python_scalar))
else:
return llcore.Constant.int(llvm_type, int(python_scalar))
def int32(x):
"""Make LLVM constants of type int32"""
return const(x, Int32)
def int64(x):
return const(x, Int64)
def zero(llvm_t):
"""
Make a zero constant of either int or real type.
Doesn't (yet) work for vector constants!
"""
if isinstance(llvm_t, llcore.IntegerType):
return llcore.Constant.int(llvm_t, 0)
else:
return llcore.Constant.real(llvm_t, 0.0)
def one(llvm_t):
"""
Make a constant 1 of either int or real type.
Doesn't (yet) work for vector constants!
"""
if isinstance(llvm_t, llcore.IntegerType):
return llcore.Constant.int(llvm_t, 1)
else:
return llcore.Constant.real(llvm_t, 1.0)
| bsd-3-clause | -8,042,694,416,398,621,000 | 25.536585 | 64 | 0.696412 | false |
compas-dev/compas | src/compas/geometry/shapes/cone.py | 1 | 7844 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import cos
from math import pi
from math import sin
from math import sqrt
from compas.utilities import pairwise
from compas.geometry import matrix_from_frame
from compas.geometry import transform_points
from compas.geometry import Circle
from compas.geometry import Frame
from compas.geometry import Plane
from compas.geometry.shapes._shape import Shape
class Cone(Shape):
"""A cone is defined by a circle and a height.
Parameters
----------
circle : tuple or :class:`compas.geometry.Circle`
The base circle of the cone.
height : float
The height of the cone.
Attributes
----------
plane : :class:`compas.geometry.Plane`
The plane containing the circle.
circle : :class:`compas.geometry.Circle`
The base circle of the cone.
radius : float
The radius of the base circle.
height : float
The height of the cone.
normal (read-only) : :class:`compas.geometry.Vector`
The normal of the base plane.
diameter : float
The diameter of the cone.
Examples
--------
>>> from compas.geometry import Plane
>>> from compas.geometry import Cone
>>> plane = Plane([0, 0, 0], [0, 0, 1])
>>> circle = Circle(plane, 5)
>>> cone = Cone(circle, 7)
"""
@property
def DATASCHEMA(self):
import schema
return schema.Schema({
'circle': {
'plane': Plane.DATASCHEMA.fget(None),
'radius': schema.And(float, lambda x: x > 0)
},
'height': schema.And(float, lambda x: x > 0)
})
@property
def JSONSCHEMANAME(self):
return 'cone'
__slots__ = ['_circle', '_height']
def __init__(self, circle, height, **kwargs):
super(Cone, self).__init__(**kwargs)
self._circle = None
self._height = None
self.circle = circle
self.height = height
@property
def data(self):
"""Returns the data dictionary that represents the cone.
Returns
-------
dict
The cone data.
"""
return {'circle': self.circle.data, 'height': self.height}
@data.setter
def data(self, data):
self.circle = Circle.from_data(data['circle'])
self.height = data['height']
@property
def plane(self):
"""Plane: The plane of the cone."""
return self.circle.plane
@plane.setter
def plane(self, plane):
self.circle.plane = Plane(*plane)
@property
def circle(self):
"""float: The circle of the cone."""
return self._circle
@circle.setter
def circle(self, circle):
self._circle = Circle(*circle)
@property
def radius(self):
"""float: The radius of the cone."""
return self.circle.radius
@radius.setter
def radius(self, radius):
self.circle.radius = float(radius)
@property
def height(self):
"""float: The height of the cone."""
return self._height
@height.setter
def height(self, height):
self._height = float(height)
@property
def normal(self):
"""Vector: The normal of the cone."""
return self.plane.normal
@property
def diameter(self):
"""float: The diameter of the cone."""
return self.circle.diameter
@property
def center(self):
"""Point: The center of the cone."""
return self.circle.center
@center.setter
def center(self, point):
self.circle.center = point
@property
def area(self):
"""Float: The surface area of the cone."""
r = self.circle.radius
return pi * r * (r + sqrt(self.height**2 + r**2))
@property
def volume(self):
"""Float: The volume of the cone."""
return pi * self.circle.radius**2 * (self.height / 3)
# ==========================================================================
# customisation
# ==========================================================================
def __repr__(self):
return 'Cone({0!r}, {1!r})'.format(self.circle, self.height)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.circle
elif key == 1:
return self.height
else:
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.circle = value
elif key == 1:
self.height = value
else:
raise KeyError
def __iter__(self):
return iter([self.circle, self.height])
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_data(cls, data):
"""Construct a cone from its data representation.
Parameters
----------
data : :obj:`dict`
The data dictionary.
Returns
-------
Cone
The constructed cone.
Examples
--------
>>> from compas.geometry import Cone
>>> from compas.geometry import Circle
>>> from compas.geometry import Plane
>>> data = {'circle': Circle(Plane.worldXY(), 5).data, 'height': 7.}
>>> cone = Cone.from_data(data)
"""
cone = cls(Circle.from_data(data['circle']), data['height'])
return cone
# ==========================================================================
# methods
# ==========================================================================
def to_vertices_and_faces(self, u=10):
"""Returns a list of vertices and faces.
Parameters
----------
u : int, optional
Number of faces in the "u" direction.
Default is ``10``.
Returns
-------
(vertices, faces)
A list of vertex locations and a list of faces,
with each face defined as a list of indices into the list of vertices.
"""
if u < 3:
raise ValueError('The value for u should be u > 3.')
vertices = [[0, 0, 0]]
a = 2 * pi / u
radius = self.circle.radius
for i in range(u):
x = radius * cos(i * a)
y = radius * sin(i * a)
vertices.append([x, y, 0])
vertices.append([0, 0, self.height])
frame = Frame.from_plane(self.circle.plane)
M = matrix_from_frame(frame)
vertices = transform_points(vertices, M)
faces = []
first = 0
last = len(vertices) - 1
for i, j in pairwise(range(1, last)):
faces.append([i, j, last])
faces.append([j, i, first])
faces.append([last - 1, 1, last])
faces.append([1, last - 1, first])
return vertices, faces
def transform(self, transformation):
"""Transform the cone.
Parameters
----------
transformation : :class:`Transformation`
The transformation used to transform the cone.
Examples
--------
>>> from compas.geometry import Frame
>>> from compas.geometry import Transformation
>>> from compas.geometry import Plane
>>> from compas.geometry import Cone
>>> from compas.geometry import Circle
>>> circle = Circle(Plane.worldXY(), 5)
>>> cone = Cone(circle, 7)
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(frame)
>>> cone.transform(T)
"""
self.circle.transform(transformation)
| mit | 7,685,976,723,404,255,000 | 26.048276 | 82 | 0.516191 | false |
daveoncode/pyvaru | tests.py | 1 | 46654 | import pprint
import re
from datetime import datetime
from unittest import TestCase
from unittest import main as run_tests
from pyvaru import ValidationRule, Validator, ValidationResult, ValidationException, RuleGroup, \
InvalidRuleGroupException
from pyvaru.rules import TypeRule, FullStringRule, ChoiceRule, MinValueRule, MaxValueRule, MinLengthRule, \
MaxLengthRule, RangeRule, PatternRule, IntervalRule, PastDateRule, FutureDateRule, UniqueItemsRule
CUSTOM_MESSAGE = 'custom message'
class ValidationRuleTest(TestCase):
def test_rule_cannot_be_instantiated_because_is_abstract(self):
with self.assertRaises(TypeError):
ValidationRule('', 'test')
class ValidationResultTest(TestCase):
def test_string_conversion_returns_formatted_string_with_errors(self):
errors = {
'first_name': FullStringRule.default_error_message,
'last_name': FullStringRule.default_error_message,
}
result = ValidationResult(errors)
self.assertEqual(str(result), pprint.pformat({'errors': errors}))
def test_string_conversion_returns_formatted_string_without_errors(self):
result = ValidationResult()
self.assertEqual(str(result), pprint.pformat({'errors': {}}))
class ValidationExceptionTest(TestCase):
def test_string_conversion_returns_formatted_string_with_errors(self):
errors = {
'first_name': FullStringRule.default_error_message,
'last_name': FullStringRule.default_error_message,
}
result = ValidationResult(errors)
exception = ValidationException(result)
expected_string = pprint.pformat({'message': exception.message, 'errors': result.errors})
self.assertEqual(str(exception), expected_string)
class ValidatorTest(TestCase):
def test_validator_cannot_be_instantiated_because_is_abstract(self):
with self.assertRaises(TypeError):
Validator({})
def test_validate_returns_expected_result_if_no_rule_is_provided(self):
class MyValidator(Validator):
def get_rules(self) -> list:
return []
validator = MyValidator({})
result = validator.validate()
self.assertIsInstance(result, ValidationResult)
self.assertTrue(result.is_successful())
self.assertEqual(result.errors, {})
self.assertEqual(str(result), "{'errors': {}}")
def test_validate_returns_expected_result_if_rules_are_respected(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 5
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 10
class ContainsRule(ValidationRule):
def apply(self) -> bool:
return 'hello' in self.apply_to
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A'),
LtRule(data['b'], 'Field B'),
ContainsRule(data['c'], 'Field C'),
]
validator = MyValidator({'a': 20, 'b': 1, 'c': 'hello world'})
result = validator.validate()
self.assertTrue(result.is_successful())
self.assertEqual(result.errors, {})
self.assertEqual(str(result), "{'errors': {}}")
def test_validate_returns_expected_result_if_rules_are_not_respected(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 200
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 0
class ContainsRule(ValidationRule):
default_error_message = 'banana not found'
def apply(self) -> bool:
return 'banana' in self.apply_to
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A', 'GtRule not respected!'),
LtRule(data['b'], 'Field B'),
ContainsRule(data['c'], 'Field C'),
]
validator = MyValidator({'a': 20, 'b': 1, 'c': 'hello world'})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 3)
self.assertEqual(result.errors.get('Field A'), ['GtRule not respected!'])
self.assertEqual(result.errors.get('Field B'), [ValidationRule.default_error_message])
self.assertEqual(result.errors.get('Field C'), [ContainsRule.default_error_message])
self.assertEqual(str(result), pprint.pformat({'errors': result.errors}))
def test_validator_as_context_processor_with_failures(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 200
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 0
class ContainsRule(ValidationRule):
default_error_message = 'banana not found'
def apply(self) -> bool:
return 'banana' in self.apply_to
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A', 'GtRule not respected!'),
LtRule(data['b'], 'Field B'),
ContainsRule(data['c'], 'Field C'),
]
inner_code_calls = 0
with self.assertRaises(ValidationException) as raise_context:
with MyValidator({'a': 20, 'b': 1, 'c': 'hello world'}):
inner_code_calls += 1
errors = raise_context.exception.validation_result.errors
self.assertEqual(inner_code_calls, 0)
self.assertIsInstance(errors, dict)
self.assertEqual(errors.get('Field A'), ['GtRule not respected!'])
self.assertEqual(errors.get('Field B'), [ValidationRule.default_error_message])
self.assertEqual(errors.get('Field C'), [ContainsRule.default_error_message])
expected_string_value = pprint.pformat({'message': raise_context.exception.message, 'errors': errors})
self.assertEqual(str(raise_context.exception), expected_string_value)
def test_validator_as_context_processor_without_failures(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 5
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 10
class ContainsRule(ValidationRule):
def apply(self) -> bool:
return 'hello' in self.apply_to
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A'),
LtRule(data['b'], 'Field B'),
ContainsRule(data['c'], 'Field C'),
]
with MyValidator({'a': 20, 'b': 1, 'c': 'hello world'}) as validator:
self.assertIsInstance(validator, MyValidator)
def test_multiple_rules_applied_to_the_same_field(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 200
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 0
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A', 'GtRuleFail'),
LtRule(data['a'], 'Field A', 'LtRuleFail'),
]
validator = MyValidator({'a': 100})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors.get('Field A'), ['GtRuleFail', 'LtRuleFail'])
def test_rules_processing_is_skipped_if_a_failing_rule_requires_it(self):
class GtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to > 200
class LtRule(ValidationRule):
def apply(self) -> bool:
return self.apply_to < 0
class MyValidator(Validator):
def get_rules(self) -> list:
data = self.data # type: dict
return [
GtRule(data['a'], 'Field A', 'GtRuleFail', stop_if_invalid=True),
LtRule(data['a'], 'Field A', 'LtRuleFail'),
]
validator = MyValidator({'a': 100})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors.get('Field A'), ['GtRuleFail'])
def test_validator_handle_possible_exceptions_in_get_rules_as_expected(self):
class DangerValidator(Validator):
def get_rules(self) -> list:
return [
FullStringRule(self.data.name, 'name')
]
# normal test
validator = DangerValidator({'name': 'Dave'})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(list(result.errors.keys()), ['get_rules'])
self.assertIsInstance(result.errors.get('get_rules'), list)
self.assertEqual(len(result.errors.get('get_rules')), 1)
self.assertIsInstance(result.errors.get('get_rules', [])[0], str)
# test as context processor
with self.assertRaises(ValidationException) as exception_context:
with DangerValidator({'name': 'Dave'}):
pass
exception_result = exception_context.exception.validation_result
self.assertFalse(exception_result.is_successful())
self.assertEqual(len(exception_result.errors), 1)
self.assertEqual(list(exception_result.errors.keys()), ['get_rules'])
self.assertIsInstance(exception_result.errors.get('get_rules'), list)
self.assertEqual(len(exception_result.errors.get('get_rules')), 1)
self.assertIsInstance(exception_result.errors.get('get_rules', [])[0], str)
def test_without_lambdas_stop_if_invalid_does_not_prevent_errors_report(self):
"""
Exception catched in get_rules() instead of the TypeRule violation we may expect, since the code is
executed as soon the method is called.
"""
class MyModel:
name = 'Foo'
class DangerValidator(Validator):
def get_rules(self) -> list:
return [
TypeRule(self.data, 'data', MyModel, stop_if_invalid=True),
FullStringRule(self.data.name, 'name'),
]
validator = DangerValidator({'name': 'Foo'})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(list(result.errors.keys()), ['get_rules'])
def test_by_using_lambda_and_stop_if_invalid_no_exception_is_reported(self):
"""
No exception catched, since data access happens after get_rules() invocation, but stop_if_invalid prevents it.
"""
class MyModel:
name = 'Foo'
class DangerValidator(Validator):
def get_rules(self) -> list:
return [
TypeRule(lambda: self.data, 'data', MyModel, stop_if_invalid=True),
FullStringRule(lambda: self.data.name, 'name'),
]
validator = DangerValidator({'name': 'Foo'})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(list(result.errors.keys()), ['data'])
def test_validator_catch_and_store_errors_that_may_occour_in_rule_apply(self):
class RuleA(ValidationRule):
def apply(self):
raise NotImplementedError
class RuleB(ValidationRule):
def apply(self):
raise ZeroDivisionError
class MyValidator(Validator):
def get_rules(self):
return [
RuleA('', 'field_a'),
RuleB('', 'field_b'),
]
validator = MyValidator({})
result = validator.validate()
self.assertFalse(result.is_successful())
self.assertEqual(len(result.errors), 2)
try:
raise NotImplementedError
except NotImplementedError as e:
expected_a = [str(e)]
self.assertEqual(result.errors.get('field_a'), expected_a)
try:
raise ZeroDivisionError
except ZeroDivisionError as e:
expected_b = [str(e)]
self.assertEqual(result.errors.get('field_b'), expected_b)
class TypeRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
rule = TypeRule({'a': 1, 'b': 2}, 'my_object', dict)
self.assertTrue(rule.apply())
def test_rule_supports_lambda_expressions(self):
rule = TypeRule(lambda: {'a': 1, 'b': 2}, 'my_object', dict)
self.assertTrue(rule.apply())
def test_rule_returns_true_if_type_is_a_subtype(self):
class BaseClass:
pass
class SubClass(BaseClass):
def __init__(self):
super().__init__()
pass
rule = TypeRule(SubClass(), 'my_object', BaseClass)
self.assertTrue(rule.apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(TypeRule([1, 2, 3], 'my_object', dict).apply())
self.assertFalse(TypeRule(123, 'my_object', dict).apply())
self.assertFalse(TypeRule('123', 'my_object', dict).apply())
self.assertFalse(TypeRule(True, 'my_object', dict).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = TypeRule({}, 'my_object', dict)
self.assertEqual(rule.get_error_message(), TypeRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = TypeRule({}, 'my_object', dict, CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because type is right:
negated_rule = ~ TypeRule({'a': 1, 'b': 2}, 'my_object', dict)
self.assertFalse(negated_rule.apply())
# since negated, pass because type is wrong:
negated_rule_2 = ~ TypeRule('banana', 'my_object', dict)
self.assertTrue(negated_rule_2.apply())
class RuleGroupTest(TestCase):
def test_bad_configuration_of_rules_raise_exception(self):
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=[None, None])
with self.assertRaises(InvalidRuleGroupException):
group.apply()
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=[(TypeRule, 1)])
with self.assertRaises(InvalidRuleGroupException):
group.apply()
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=[[TypeRule]])
with self.assertRaises(InvalidRuleGroupException):
group.apply()
def test_group_returns_true_if_respected(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 1}),
UniqueItemsRule
]
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=rules)
self.assertTrue(group.apply())
def test_group_catch_rules_exception(self):
class BadRule(ValidationRule):
def apply(self) -> bool:
raise FileNotFoundError
rules = [BadRule]
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=rules)
self.assertFalse(group.apply())
def test_group_supports_lambda_expressions(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 1}),
UniqueItemsRule
]
group = RuleGroup(lambda: ['Italy', 'France', 'Germany'], label='Countries', rules=rules)
self.assertTrue(group.apply())
def test_group_returns_false_if_not_respected(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 2}),
UniqueItemsRule
]
# TypeRule test
group_1 = RuleGroup(apply_to='foo', label='Countries', rules=rules)
self.assertFalse(group_1.apply())
# MinLengthRule test
group_2 = RuleGroup(apply_to=['USA'], label='Countries', rules=rules)
self.assertFalse(group_2.apply())
# UniqueItemsRule test
group_3 = RuleGroup(apply_to=['USA', 'Italy', 'USA'], label='Countries', rules=rules)
self.assertFalse(group_3.apply())
def test_group_returns_false_if_given_type_is_wrong(self):
class MyObject:
pass
rules = [
(MinLengthRule, {'min_length': 2}),
UniqueItemsRule
]
group = RuleGroup(lambda: MyObject(), label='Countries', rules=rules)
self.assertFalse(group.apply())
def test_default_message_is_used_if_no_custom_provided(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 2}),
UniqueItemsRule
]
# TypeRule test
group_1 = RuleGroup(apply_to='foo', label='Countries', rules=rules)
group_1.apply()
self.assertEqual(group_1.get_error_message(), TypeRule.default_error_message)
# MinLengthRule test
group_2 = RuleGroup(apply_to=['USA'], label='Countries', rules=rules)
group_2.apply()
self.assertEqual(group_2.get_error_message(), MinLengthRule.default_error_message)
# UniqueItemsRule test
group_3 = RuleGroup(apply_to=['USA', 'Italy', 'USA'], label='Countries', rules=rules)
group_3.apply()
self.assertEqual(group_3.get_error_message(), UniqueItemsRule.default_error_message)
def test_custom_message_used_if_provided(self):
rules = [
(TypeRule, {'valid_type': list, 'error_message': 'Custom TypeRule message'}),
(MinLengthRule, {'min_length': 2, 'error_message': 'Custom MinLengthRule message'}),
(UniqueItemsRule, {'error_message': 'Custom UniqueItemsRule message'})
]
# TypeRule test
group_1 = RuleGroup(apply_to='foo', label='Countries', rules=rules)
group_1.apply()
self.assertEqual(group_1.get_error_message(), 'Custom TypeRule message')
# MinLengthRule test
group_2 = RuleGroup(apply_to=['USA'], label='Countries', rules=rules)
group_2.apply()
self.assertEqual(group_2.get_error_message(), 'Custom MinLengthRule message')
# UniqueItemsRule test
group_3 = RuleGroup(apply_to=['USA', 'Italy', 'USA'], label='Countries', rules=rules)
group_3.apply()
self.assertEqual(group_3.get_error_message(), 'Custom UniqueItemsRule message')
def test_error_message_fallback_if_no_failed_rule(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 1}),
UniqueItemsRule
]
group = RuleGroup(apply_to=['Italy', 'France', 'Germany'], label='Countries', rules=rules)
self.assertTrue(group.apply())
self.assertEqual(group.get_error_message(), RuleGroup.default_error_message)
# bitwise operators
def test_group_can_be_negated_with_bitwise_inversion(self):
rules = [
(TypeRule, {'valid_type': list}),
(MinLengthRule, {'min_length': 2}),
UniqueItemsRule
]
# TypeRule test
group_1 = ~ RuleGroup(apply_to='foo', label='Countries', rules=rules)
self.assertTrue(group_1.apply())
# MinLengthRule test
group_2 = ~ RuleGroup(apply_to=['USA'], label='Countries', rules=rules)
self.assertTrue(group_2.apply())
# UniqueItemsRule test
group_3 = ~ RuleGroup(apply_to=['USA', 'Italy', 'USA'], label='Countries', rules=rules)
self.assertTrue(group_3.apply())
group_4 = ~ RuleGroup(apply_to=['USA', 'Italy', 'Germany'], label='Countries', rules=rules)
self.assertFalse(group_4.apply())
class FullStringRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(FullStringRule('ciao', 'label').apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(FullStringRule(lambda: 'ciao', 'label').apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(FullStringRule('', 'label').apply())
self.assertFalse(FullStringRule(' \n\n ', 'label').apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(FullStringRule(None, 'label').apply())
self.assertFalse(FullStringRule([1, 2, 3], 'label').apply())
self.assertFalse(FullStringRule(datetime.now(), 'label').apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = FullStringRule('ciao', 'label')
self.assertEqual(rule.get_error_message(), FullStringRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = FullStringRule('ciao', 'label', CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because the string has content
negated_rule = ~ FullStringRule('ciao', 'label')
self.assertFalse(negated_rule.apply())
# since negated, pass because the string is empty
negated_rule = ~ FullStringRule('', 'label')
self.assertTrue(negated_rule.apply())
class ChoiceRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(ChoiceRule('B', 'label', choices=('A', 'B', 'C')).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(ChoiceRule(lambda: 'B', 'label', choices=('A', 'B', 'C')).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(ChoiceRule('D', 'label', choices=('A', 'B', 'C')).apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(ChoiceRule({'a': 1}, 'label', choices=('A', 'B', 'C')).apply())
self.assertFalse(ChoiceRule(42, 'label', choices=('A', 'B', 'C')).apply())
self.assertFalse(ChoiceRule(True, 'label', choices=('A', 'B', 'C')).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = ChoiceRule('B', 'label', choices=('A', 'B', 'C'))
self.assertEqual(rule.get_error_message(), ChoiceRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = ChoiceRule('B', 'label', choices=('A', 'B', 'C'), error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
def test_rule_catches_exception_in_apply(self):
self.assertFalse(ChoiceRule('x', 'label', choices=False).apply())
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because "B" is in available options:
negated_rule = ~ ChoiceRule('B', 'label', choices=('A', 'B', 'C'))
self.assertFalse(negated_rule.apply())
# since negated, pass because type "Z" is not in available options:
negated_rule_2 = ~ ChoiceRule('Z', 'label', choices=('A', 'B', 'C'))
self.assertTrue(negated_rule_2.apply())
class MinValueRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(MinValueRule(100, 'label', min_value=50).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(MinValueRule(lambda: 100, 'label', min_value=50).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(MinValueRule(1, 'label', min_value=50).apply())
def test_rules_returns_false_if_the_given_type_is_wrong(self):
self.assertFalse(MinValueRule('ciao', 'label', min_value=50).apply())
self.assertFalse(MinValueRule({'a': 0}, 'label', min_value=50).apply())
self.assertFalse(MinValueRule([1, 2, 3], 'label', min_value=50).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = MinValueRule(100, 'label', min_value=50)
self.assertEqual(rule.get_error_message(), MinValueRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = MinValueRule(100, 'label', min_value=50, error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because 100 is > 50
negated_rule = ~ MinValueRule(100, 'label', min_value=50)
self.assertFalse(negated_rule.apply())
# since negated, pass because 10 is < 50
negated_rule_2 = ~ MinValueRule(10, 'label', min_value=50)
self.assertTrue(negated_rule_2.apply())
# since negated, pass because 50 == 50
negated_rule_3 = ~ MinValueRule(50, 'label', min_value=50)
self.assertFalse(negated_rule_3.apply())
class MaxValueRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(MaxValueRule(10, 'label', max_value=50).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(MaxValueRule(lambda: 10, 'label', max_value=50).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(MaxValueRule(1000, 'label', max_value=50).apply())
def test_rules_returns_false_if_the_given_type_is_wrong(self):
self.assertFalse(MaxValueRule('hello', 'label', max_value=50).apply())
self.assertFalse(MaxValueRule([1, 2, 3], 'label', max_value=50).apply())
self.assertFalse(MaxValueRule({'a': 'b'}, 'label', max_value=50).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = MaxValueRule(10, 'label', max_value=50)
self.assertEqual(rule.get_error_message(), MaxValueRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = MaxValueRule(10, 'label', max_value=50, error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because 10 is < 50
negated_rule = ~ MaxValueRule(10, 'label', max_value=50)
self.assertFalse(negated_rule.apply())
# since negated, pass because 100 is > 50
negated_rule_2 = ~ MaxValueRule(100, 'label', max_value=50)
self.assertTrue(negated_rule_2.apply())
# since negated, pass because 50 == 50
negated_rule_3 = ~ MaxValueRule(50, 'label', max_value=50)
self.assertFalse(negated_rule_3.apply())
class MinLengthRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(MinLengthRule('hello', 'label', min_length=3).apply())
self.assertTrue(MinLengthRule(['foo', 'bar', 'baz'], 'label', min_length=3).apply())
self.assertTrue(MinLengthRule(('foo', 'bar', 'baz'), 'label', min_length=3).apply())
self.assertTrue(MinLengthRule({'a': 1, 'b': 2, 'c': 3}, 'label', min_length=3).apply())
self.assertTrue(MinLengthRule({'foo', 'bar', 'baz'}, 'label', min_length=3).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(MinLengthRule(lambda: 'hello', 'label', min_length=3).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(MinLengthRule('hello', 'label', min_length=10).apply())
self.assertFalse(MinLengthRule(['foo', 'bar', 'baz'], 'label', min_length=10).apply())
self.assertFalse(MinLengthRule(('foo', 'bar', 'baz'), 'label', min_length=10).apply())
self.assertFalse(MinLengthRule({'a': 1, 'b': 2, 'c': 3}, 'label', min_length=10).apply())
self.assertFalse(MinLengthRule({'foo', 'bar', 'baz'}, 'label', min_length=10).apply())
def test_rules_returns_false_if_the_given_type_is_wrong(self):
self.assertFalse(MinLengthRule(5, 'label', min_length=10).apply())
self.assertFalse(MinLengthRule(datetime.now(), 'label', min_length=10).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = MinLengthRule('hello', 'label', min_length=10)
self.assertEqual(rule.get_error_message(), MinLengthRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = MinLengthRule('hello', 'label', min_length=10, error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because len('abcde') > 3
negated_rule = ~ MinLengthRule('abcde', 'label', min_length=3)
self.assertFalse(negated_rule.apply())
# since negated, pass because len('a') is < 3
negated_rule_2 = ~ MinLengthRule('a', 'label', min_length=3)
self.assertTrue(negated_rule_2.apply())
# since negated, pass because same length
negated_rule_3 = ~ MinLengthRule('abc', 'label', min_length=3)
self.assertFalse(negated_rule_3.apply())
class MaxLengthRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(MaxLengthRule('abc', 'label', max_length=3).apply())
self.assertTrue(MaxLengthRule(['foo', 'bar', 'baz'], 'label', max_length=3).apply())
self.assertTrue(MaxLengthRule(('foo', 'bar', 'baz'), 'label', max_length=3).apply())
self.assertTrue(MaxLengthRule({'a': 1, 'b': 2, 'c': 3}, 'label', max_length=3).apply())
self.assertTrue(MaxLengthRule({'foo', 'bar', 'baz'}, 'label', max_length=3).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(MaxLengthRule(lambda: 'abc', 'label', max_length=3).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(MaxLengthRule('abc', 'label', max_length=2).apply())
self.assertFalse(MaxLengthRule(['foo', 'bar', 'baz'], 'label', max_length=2).apply())
self.assertFalse(MaxLengthRule(('foo', 'bar', 'baz'), 'label', max_length=2).apply())
self.assertFalse(MaxLengthRule({'a': 1, 'b': 2, 'c': 3}, 'label', max_length=2).apply())
self.assertFalse(MaxLengthRule({'foo', 'bar', 'baz'}, 'label', max_length=2).apply())
def test_rules_returns_false_if_the_given_type_is_wrong(self):
self.assertFalse(MaxLengthRule(8, 'label', max_length=2).apply())
self.assertFalse(MaxLengthRule(datetime.now(), 'label', max_length=2).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = MaxLengthRule('abc', 'label', max_length=3)
self.assertEqual(rule.get_error_message(), MaxLengthRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = MaxLengthRule('abc', 'label', max_length=3, error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because len('abcde') < 3
negated_rule = ~ MaxLengthRule('a', 'label', max_length=3)
self.assertFalse(negated_rule.apply())
# since negated, pass because len('abcde') is > 3
negated_rule_2 = ~ MaxLengthRule('abcde', 'label', max_length=3)
self.assertTrue(negated_rule_2.apply())
# since negated, pass because same length
negated_rule_3 = ~ MaxLengthRule('abc', 'label', max_length=3)
self.assertFalse(negated_rule_3.apply())
class RangeRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(RangeRule(20, 'label', valid_range=range(10, 100)).apply())
self.assertTrue(RangeRule(20, 'label', valid_range=range(100, 1, -1)).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(RangeRule(lambda: 20, 'label', valid_range=range(10, 100)).apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(RangeRule(5, 'label', valid_range=range(10, 100)).apply())
self.assertFalse(RangeRule(200, 'label', valid_range=range(10, 100)).apply())
def test_floats_are_never_in_range(self):
self.assertFalse(RangeRule(11.5, 'label', valid_range=range(10, 100)).apply())
def test_non_numeric_values_are_never_in_range(self):
self.assertFalse(RangeRule('hello', 'label', valid_range=range(10, 100)).apply())
self.assertFalse(RangeRule([1, 2, 3], 'label', valid_range=range(10, 100)).apply())
self.assertFalse(RangeRule(datetime.now(), 'label', valid_range=range(10, 100)).apply())
def test_range_step_is_respected(self):
# with default step of 1, value 22 is in range
self.assertTrue(RangeRule(22, 'label', valid_range=range(10, 100)).apply())
# with step of 5, value 22 should not be considered in range
self.assertFalse(RangeRule(22, 'label', valid_range=range(10, 100, 5)).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = RangeRule(20, 'label', valid_range=range(10, 100))
self.assertEqual(rule.get_error_message(), RangeRule.default_error_message)
def test_custom_message_used_if_provided(self):
msg = 'custom message'
rule = RangeRule(20, 'label', valid_range=range(10, 100), error_message=msg)
self.assertEqual(rule.get_error_message(), msg)
def test_rule_catches_exception_in_apply(self):
self.assertFalse(RangeRule(11.5, 'label', valid_range=False).apply())
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because 22 is in range
negated_rule = ~ RangeRule(22, 'label', valid_range=range(10, 100))
self.assertFalse(negated_rule.apply())
# since negated, pass because 500 is not in range
negated_rule_2 = ~ RangeRule(500, 'label', valid_range=range(10, 100))
self.assertTrue(negated_rule_2.apply())
class IntervalRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(IntervalRule(25, interval_from=10, interval_to=50, label='label').apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(IntervalRule(lambda: 25, interval_from=10, interval_to=50, label='label').apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(IntervalRule(9, interval_from=10, interval_to=50, label='label').apply())
self.assertFalse(IntervalRule(51, interval_from=10, interval_to=50, label='label').apply())
self.assertFalse(IntervalRule('hello', interval_from=10, interval_to=50, label='label').apply())
self.assertFalse(IntervalRule([1, 2, 3], interval_from=10, interval_to=50, label='label').apply())
def test_rules_returns_false_if_the_given_type_is_wrong(self):
self.assertFalse(IntervalRule(datetime.now(), interval_from=10, interval_to=50, label='label').apply())
self.assertFalse(IntervalRule({'a': 123}, interval_from=10, interval_to=50, label='label').apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = IntervalRule(9, interval_from=10, interval_to=50, label='label')
self.assertEqual(rule.get_error_message(), IntervalRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = IntervalRule(9, interval_from=10, interval_to=50, label='label', error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because 25 is in the interval
negated_rule = ~ IntervalRule(25, interval_from=10, interval_to=50, label='label')
self.assertFalse(negated_rule.apply())
# since negated, pass because 200 is not in the interval
negated_rule = ~ IntervalRule(200, interval_from=10, interval_to=50, label='label')
self.assertTrue(negated_rule.apply())
class PatternRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(PatternRule('hello', 'label', pattern=r'^[a-z]+$').apply())
self.assertTrue(PatternRule('HELLO', 'label', pattern=r'^[a-z]+$', flags=re.IGNORECASE).apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(PatternRule(lambda: 'hello', 'label', pattern=r'^[a-z]+$').apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(PatternRule('HELLO', 'label', pattern=r'^[a-z]+$').apply())
self.assertFalse(PatternRule('599.99', 'label', pattern=r'^[a-z]+$').apply())
self.assertFalse(PatternRule('', 'label', pattern=r'^[a-z]+$').apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(PatternRule(42, 'label', pattern=r'^[a-z]+$').apply())
self.assertFalse(PatternRule([1, 2, 3], 'label', pattern=r'^[a-z]+$').apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = PatternRule('hello', 'label', pattern=r'[a-z]+')
self.assertEqual(rule.get_error_message(), PatternRule.default_error_message)
def test_custom_message_used_if_provided(self):
msg = 'custom message'
rule = PatternRule('hello', 'label', pattern=r'[a-z]+', error_message=msg)
self.assertEqual(rule.get_error_message(), msg)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because pattern is matched
negated_rule = ~ PatternRule('hello', 'label', pattern=r'^[a-z]+$')
self.assertFalse(negated_rule.apply())
# since negated, pass because pattern is not matched
negated_rule_2 = ~ PatternRule('213', 'label', pattern=r'^[a-z]+$')
self.assertTrue(negated_rule_2.apply())
class PastDateRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(PastDateRule(datetime(2015, 1, 1), 'date', reference_date=datetime(2020, 1, 1)).apply())
def test_rule_supports_lambda_expressions(self):
rule = PastDateRule(lambda: datetime(2015, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertTrue(rule.apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(PastDateRule(datetime(2022, 1, 1), 'date', reference_date=datetime(2020, 1, 1)).apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(PastDateRule('nope!', 'date', reference_date=datetime(2020, 1, 1)).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = PastDateRule(datetime(2015, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertEqual(rule.get_error_message(), PastDateRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = PastDateRule(datetime(2015, 1, 1),
'date',
reference_date=datetime(2020, 1, 1),
error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
def test_rule_catches_exceptions_in_apply(self):
self.assertFalse(PastDateRule(datetime(2022, 1, 1), 'date', reference_date=True).apply())
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fail because date is in the past
negated_rule = ~ PastDateRule(datetime(2015, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertFalse(negated_rule.apply())
# since negated, pass because date is not in the past
negated_rule_2 = ~ PastDateRule(datetime(2030, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertTrue(negated_rule_2.apply())
class FutureDateRuleTest(TestCase):
def test_rule_returns_true_if_respected(self):
self.assertTrue(FutureDateRule(datetime(2015, 1, 1), 'date', reference_date=datetime(2010, 1, 1)).apply())
def test_rule_supports_lambda_expressions(self):
rule = FutureDateRule(lambda: datetime(2015, 1, 1), 'date', reference_date=datetime(2010, 1, 1))
self.assertTrue(rule.apply())
def test_rule_returns_false_if_not_respected(self):
self.assertFalse(FutureDateRule(datetime(2000, 1, 1), 'date', reference_date=datetime(2020, 1, 1)).apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(FutureDateRule('nope!', 'date', reference_date=datetime(2020, 1, 1)).apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = FutureDateRule(datetime(2015, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertEqual(rule.get_error_message(), FutureDateRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = FutureDateRule(datetime(2015, 1, 1),
'date',
reference_date=datetime(2020, 1, 1),
error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
def test_rule_catches_exceptions_in_apply(self):
self.assertFalse(FutureDateRule(datetime(2022, 1, 1), 'date', reference_date=True).apply())
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fail because date is in the future
negated_rule = ~ FutureDateRule(datetime(2055, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertFalse(negated_rule.apply())
# since negated, pass because date is not in the future
negated_rule_2 = ~ FutureDateRule(datetime(1999, 1, 1), 'date', reference_date=datetime(2020, 1, 1))
self.assertTrue(negated_rule_2.apply())
class UniqueItemsRuleTest(TestCase):
def test_rule_returns_always_true_for_sets(self):
self.assertTrue(UniqueItemsRule({'one', 'two', 'three'}, 'set_test').apply())
self.assertTrue(UniqueItemsRule({1, 1, 1, 1}, 'set_test').apply())
self.assertTrue(UniqueItemsRule(set(), 'list').apply())
def test_rule_returns_true_if_respected_with_lists(self):
self.assertTrue(UniqueItemsRule(['one', 'two', 'three'], 'list_test').apply())
def test_rule_returns_false_if_not_respected_with_lists(self):
self.assertFalse(UniqueItemsRule(['one', 'two', 'three', 'one'], 'list_test').apply())
def test_rule_returns_true_if_respected_with_tuples(self):
self.assertTrue(UniqueItemsRule(('one', 'two', 'three'), 'tuple_test').apply())
def test_rule_returns_false_if_not_respected_with_tuples(self):
self.assertFalse(UniqueItemsRule(('one', 'one', 'two', 'three'), 'tuple_test').apply())
def test_rule_returns_true_if_respected_with_strings(self):
self.assertTrue(UniqueItemsRule('ABCDE', 'string_test').apply())
def test_rule_returns_false_if_not_respected_with_strings(self):
self.assertFalse(UniqueItemsRule('ABCDEA', 'string_test').apply())
def test_rule_returns_true_if_respected_with_dictionaries(self):
self.assertTrue(UniqueItemsRule({'a': 1}, 'dict_test').apply())
self.assertTrue(UniqueItemsRule({'a': 1, 'b': 2}, 'dict_test').apply())
complex_data = {
'a': {
'x': 1,
'y': [1, 2, 3]
},
'b': {
'x': 1,
'y': [1, 2, 3, 4]
}
}
self.assertTrue(UniqueItemsRule(complex_data, 'dict_test').apply())
def test_rule_returns_false_if_not_respected_with_dictionaries(self):
self.assertFalse(UniqueItemsRule({'a': 1, 'b': 1}, 'dict_test').apply())
complex_data = {
'a': {
'x': 1,
'y': [1, 2, 3]
},
'b': {
'x': 1,
'y': [1, 2, 3]
}
}
self.assertFalse(UniqueItemsRule(complex_data, 'dict_test').apply())
def test_rule_supports_lambda_expressions(self):
self.assertTrue(UniqueItemsRule(lambda: ['one', 'two', 'three'], 'list').apply())
def test_rule_returns_false_if_given_type_is_wrong(self):
self.assertFalse(UniqueItemsRule(42, 'list').apply())
self.assertFalse(UniqueItemsRule(True, 'list').apply())
self.assertFalse(UniqueItemsRule(datetime.now(), 'list').apply())
def test_default_message_is_used_if_no_custom_provided(self):
rule = UniqueItemsRule(['one', 'two', 'three'], 'list')
self.assertEqual(rule.get_error_message(), UniqueItemsRule.default_error_message)
def test_custom_message_used_if_provided(self):
rule = UniqueItemsRule(['one', 'two', 'three'], 'list', error_message=CUSTOM_MESSAGE)
self.assertEqual(rule.get_error_message(), CUSTOM_MESSAGE)
# bitwise operators
def test_rule_can_be_negated_with_bitwise_inversion(self):
# since negated, fails because the list does not contain duplicated items
negated_rule = ~ UniqueItemsRule(['one', 'two', 'three'], 'list_test')
self.assertFalse(negated_rule.apply())
# since negated, pass because the list contains duplicated items
negated_rule = ~ UniqueItemsRule(['one', 'two', 'three', 'one'], 'list_test')
self.assertTrue(negated_rule.apply())
if __name__ == '__main__':
run_tests(verbosity=2)
| mit | 6,725,053,460,130,782,000 | 42.683521 | 118 | 0.624791 | false |
anirudhSK/chromium | build/android/gyp/util/build_device.py | 1 | 2886 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A simple device interface for build steps.
"""
import logging
import os
import re
import sys
import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import android_commands
from pylib.android_commands import GetAttachedDevices
class BuildDevice(object):
def __init__(self, configuration):
self.id = configuration['id']
self.description = configuration['description']
self.install_metadata = configuration['install_metadata']
self.adb = android_commands.AndroidCommands(self.id)
def RunShellCommand(self, *args, **kwargs):
return self.adb.RunShellCommand(*args, **kwargs)
def PushIfNeeded(self, *args, **kwargs):
return self.adb.PushIfNeeded(*args, **kwargs)
def GetSerialNumber(self):
return self.id
def Install(self, *args, **kwargs):
return self.adb.Install(*args, **kwargs)
def GetInstallMetadata(self, apk_package):
"""Gets the metadata on the device for the apk_package apk."""
# Matches lines like:
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell.apk
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell-1.apk
apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
matches = filter(apk_matcher, self.install_metadata)
return matches[0] if matches else None
def GetConfigurationForDevice(id):
adb = android_commands.AndroidCommands(id)
configuration = None
has_root = False
is_online = adb.IsOnline()
if is_online:
cmd = 'ls -l /data/app; getprop ro.build.description'
cmd_output = adb.RunShellCommand(cmd)
has_root = not 'Permission denied' in cmd_output[0]
if not has_root:
# Disable warning log messages from EnableAdbRoot()
logging.getLogger().disabled = True
has_root = adb.EnableAdbRoot()
logging.getLogger().disabled = False
cmd_output = adb.RunShellCommand(cmd)
configuration = {
'id': id,
'description': cmd_output[-1],
'install_metadata': cmd_output[:-1],
}
return configuration, is_online, has_root
def WriteConfigurations(configurations, path):
# Currently we only support installing to the first device.
build_utils.WriteJson(configurations[:1], path, only_if_changed=True)
def ReadConfigurations(path):
return build_utils.ReadJson(path)
def GetBuildDevice(configurations):
assert len(configurations) == 1
return BuildDevice(configurations[0])
def GetBuildDeviceFromPath(path):
configurations = ReadConfigurations(path)
if len(configurations) > 0:
return GetBuildDevice(ReadConfigurations(path))
return None
| bsd-3-clause | -1,155,260,435,384,344,600 | 28.44898 | 76 | 0.700277 | false |
nhmc/xastropy | xastropy/spec/continuum.py | 1 | 2939 | """
#;+
#; NAME:
#; continuum
#; Version 1.0
#;
#; PURPOSE:
#; Module for continuum code
#; 20-Aug-2015 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, imp
import astropy as apy
from astropy import units as u
from astropy import constants as const
from astropy.io import fits, ascii
from linetools.spectra.xspectrum1d import XSpectrum1D
from xastropy.xutils import xdebug as xdb
xa_path = imp.find_module('xastropy')[1]
def init_conti_dict(Norm=0., tilt=0., piv_wv=0., igm='None'):
'''Initialize a continuum conti_dict
Parameters:
----------
Norm: float, optional
Normaliztion
tilt: float, optional
Power-law tilt to continuum
piv_wv: float, optional
Pivot wave for tilt. Best kept *without* units
igm: str, optional
Adopt average IGM model? ['None']
Returns:
---------
conti_dict: dict
Useful for simple modeling. Keep as a dict for JSON writing
'''
conti_dict = dict(Norm=Norm, tilt=tilt, piv_wv=piv_wv, igm=igm)
#
return conti_dict
def get_telfer_spec(zqso=0., igm=False):
'''Generate a Telfer QSO composite spectrum
Paraemters:
----------
zqso: float, optional
Redshift of the QSO
igm: bool, optional
Include IGM opacity? [False]
Returns:
--------
telfer_spec: XSpectrum1D
Spectrum
'''
# Read
telfer = ascii.read(
xa_path+'/data/quasar/telfer_hst_comp01_rq.ascii', comment='#')
scale = telfer['flux'][(telfer['wrest'] == 1450.)]
telfer_spec = XSpectrum1D.from_tuple((telfer['wrest']*(1+zqso),
telfer['flux']/scale[0])) # Observer frame
# IGM?
if igm is True:
'''The following is quite experimental.
Use at your own risk.
'''
import multiprocessing
from xastropy.igm.fN import model as xifm
from xastropy.igm import tau_eff as xit
fN_model = xifm.default_model()
# Expanding range of zmnx (risky)
fN_model.zmnx = (0.,5.)
# Parallel
igm_wv = np.where(telfer['wrest']<1220.)[0]
adict = []
for wrest in telfer_spec.dispersion[igm_wv].value:
tdict = dict(ilambda=wrest, zem=zqso, fN_model=fN_model)
adict.append(tdict)
# Run
#xdb.set_trace()
pool = multiprocessing.Pool(4) # initialize thread pool N threads
ateff = pool.map(xit.map_etl, adict)
# Apply
telfer_spec.flux[igm_wv] *= np.exp(-1.*np.array(ateff))
# Return
return telfer_spec
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
flg_tst = 0
flg_tst += 2**0 # Simple Telfer
#if (flg_fig % 2**4) >= 2**3:
| bsd-3-clause | 7,977,228,251,095,165,000 | 25.963303 | 82 | 0.560735 | false |
CloudNcodeInc/djmail | djmail/south_migrations/0001_initial.py | 1 | 3184 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table('djmail_message', (
('uuid', self.gf('django.db.models.fields.CharField')(default='e4fca98e-8cff-11e4-92de-70188bfc3fc1', max_length=40, primary_key=True)),
('from_email', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('to_email', self.gf('django.db.models.fields.TextField')(blank=True)),
('body_text', self.gf('django.db.models.fields.TextField')(blank=True)),
('body_html', self.gf('django.db.models.fields.TextField')(blank=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('data', self.gf('django.db.models.fields.TextField')(blank=True)),
('retry_count', self.gf('django.db.models.fields.SmallIntegerField')(default=-1)),
('status', self.gf('django.db.models.fields.SmallIntegerField')(default=10)),
('priority', self.gf('django.db.models.fields.SmallIntegerField')(default=50)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('sent_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('exception', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('djmail', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('djmail_message')
models = {
'djmail.message': {
'Meta': {'ordering': "[u'created_at']", 'object_name': 'Message'},
'body_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'exception': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'priority': ('django.db.models.fields.SmallIntegerField', [], {'default': '50'}),
'retry_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '-1'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'to_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'e4fd26de-8cff-11e4-92de-70188bfc3fc1'", 'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['djmail']
| bsd-3-clause | -8,891,581,562,814,985,000 | 59.075472 | 159 | 0.592023 | false |
azyobuzin/TbrFeed | tbrfeed/feedgen.py | 1 | 1888 | import datetime
import re
import xml.sax.saxutils
import flask
import tumblr
def img_tag_from_photo(photo):
alt_sizes = photo["alt_sizes"]
alt_size = None
for size in alt_sizes:
if size["width"] > 420 and size["width"] <= 500:
alt_size = size
if not alt_size:
alt_size = alt_sizes[0]
return """<img src=%s width="%s" height="%s" />""" \
% (xml.sax.saxutils.quoteattr(alt_size["url"]), alt_size["width"], alt_size["height"])
def create_title(post):
title = post.get("title")
if title:
return post["title"]
title = post["type"].capitalize()
content = post.get("body")
if not content: content = post.get("caption")
if not content: content = post.get("text")
if not content: content = post.get("description")
if not content: content = post.get("question")
if content:
title += ": " + xml.sax.saxutils.unescape(re.sub(r"(\<.*?\>|[\r\n])", "", content))
if len(title) > 60:
title = title[:60-3] + "..."
return title
def create_description(post):
return flask.render_template("feed_%s.html" % (post["type"],), post=post, img_tag_from_photo=img_tag_from_photo)
def format_date(date_str):
return tumblr.parse_date(date_str).strftime("%a, %d %b %Y %H:%M:%S GMT")
def generate_rss(uri, username, type, posts):
return flask.render_template("rss.xml",
uri=uri,
username=username,
type=type,
posts=posts,
create_title=create_title,
create_description=create_description,
parse_date=tumblr.parse_date)
def generate_atom(uri, username, type, posts):
return flask.render_template("atom.xml",
uri=uri,
username=username,
type=type,
posts=posts,
create_title=create_title,
create_description=create_description,
parse_date=tumblr.parse_date)
| mit | 6,560,032,934,799,633,000 | 28.046154 | 116 | 0.60911 | false |
RyanSkraba/beam | sdks/python/apache_beam/examples/snippets/transforms/aggregation/count_test.py | 1 | 2185 | # coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from apache_beam.examples.snippets.util import assert_matches_stdout
from apache_beam.testing.test_pipeline import TestPipeline
from . import count
def check_total_elements(actual):
expected = '''[START total_elements]
10
[END total_elements]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_total_elements_per_key(actual):
expected = '''[START total_elements_per_key]
('spring', 4)
('summer', 3)
('fall', 2)
('winter', 1)
[END total_elements_per_key]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
def check_total_unique_elements(actual):
expected = '''[START total_unique_elements]
('🍓', 1)
('🥕', 3)
('🍆', 2)
('🍅', 3)
('🌽', 1)
[END total_unique_elements]'''.splitlines()[1:-1]
assert_matches_stdout(actual, expected)
@mock.patch('apache_beam.Pipeline', TestPipeline)
@mock.patch(
'apache_beam.examples.snippets.transforms.aggregation.count.print', str)
class CountTest(unittest.TestCase):
def test_count_globally(self):
count.count_globally(check_total_elements)
def test_count_per_key(self):
count.count_per_key(check_total_elements_per_key)
def test_count_per_element(self):
count.count_per_element(check_total_unique_elements)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,968,497,958,140,045,000 | 27.933333 | 76 | 0.731797 | false |
RedHatInsights/insights-core | insights/parsers/avc_cache_threshold.py | 1 | 1082 | """
AvcCacheThreshold - File ``/sys/fs/selinux/avc/cache_threshold``
================================================================
This parser reads the content of ``/sys/fs/selinux/avc/cache_threshold``.
"""
from .. import parser, CommandParser
from ..parsers import ParseException
from insights.specs import Specs
@parser(Specs.avc_cache_threshold)
class AvcCacheThreshold(CommandParser):
"""
Class ``AvcCacheThreshold`` parses the content of the ``/sys/fs/selinux/avc/cache_threshold``.
Attributes:
cache_threshold (int): It is used to show the value of cache threshold.
A typical sample of the content of this file looks like::
512
Examples:
>>> type(avc_cache_threshold)
<class 'insights.parsers.avc_cache_threshold.AvcCacheThreshold'>
>>> avc_cache_threshold.cache_threshold
512
"""
def parse_content(self, content):
if len(content) != 1:
raise ParseException("Error: ", content[0] if content else 'empty file')
self.cache_threshold = int(content[0].strip())
| apache-2.0 | -5,647,076,919,696,862,000 | 29.055556 | 98 | 0.634011 | false |
MartinPaulo/resplat | storage/report_unfunded.py | 1 | 2761 | from decimal import *
from operator import attrgetter
from storage.models import Collection
from storage.report_funding import ReportRow, FundingReportRow, \
FundingReportForCollection, AbstractFundingReportBase
class Bunch(list):
"""
http://code.activestate.com/recipes/52308-the-simple-but-handy-collector-of-a-bunch-of-named/
"""
def __init__(self, *args, **kwds):
super().__init__()
self[:] = list(args)
setattr(self, '__dict__', kwds)
class UnfundedReportRow(ReportRow):
def __init__(self, collection, storage_product, unfunded_value):
self.id = collection.id
self.code = collection.application_code
self.name = str(collection.name)
self.product = str(storage_product)
self.value = unfunded_value
def add(self, another_unfunded_row):
self.value = self.value + another_unfunded_row.value
def is_total(self):
return self.product == self.TOTAL_KEY[1]
def __str__(self):
return '(id/name/product/value) = (%s/%s/%s/%s)' % (
self.id, self.name, self.product, self.value)
class UnfundedReportForAllCollections(AbstractFundingReportBase):
GLOBAL_TOTAL_COLLECTION = Bunch(app_id=-1, id=-1,
application_code=FundingReportRow.ALL_TEXT,
name=FundingReportRow.TOTAL_KEY[1])
def __init__(self):
super().__init__()
# noinspection PyTypeChecker
self.total = UnfundedReportRow(self.GLOBAL_TOTAL_COLLECTION,
FundingReportRow.TOTAL_KEY[1],
Decimal(0.0))
self.reportType = self.BY_UNFUNDED_COLLECTION
self.report = self.generate_report()
self.process_done = True
def generate_report(self):
result = []
for collection in Collection.objects.all().order_by('name'):
funding_report = FundingReportForCollection(collection)
for storage_product in funding_report.global_data_dict.keys():
dict_ = funding_report.global_data_dict[storage_product]
unfunded_storage_product = dict_[FundingReportRow.UNFUNDED_KEY]
if unfunded_storage_product and \
unfunded_storage_product.ingested > 0:
new_row = UnfundedReportRow(
collection, storage_product,
unfunded_storage_product.ingested)
result.append(new_row)
self.total.add(new_row)
# Sort rows highest to lowest value
result = sorted(result, key=attrgetter('value'), reverse=True)
result.append(self.total)
return result
| lgpl-3.0 | 5,416,138,525,681,396,000 | 37.347222 | 97 | 0.597247 | false |
polarise/python-bioclasses | setup.py | 1 | 1055 | from distutils.core import setup
setup(
name='BioClasses',
version='0.1.6',
packages=["BioClasses"],
data_files=[("genetic_codes", \
["data/genetic_codes/euplotid_genetic_code.txt", \
"data/genetic_codes/human_genetic_code.txt", \
"data/genetic_codes/tetrahymena_genetic_code.txt"]), \
("CAI_tables", \
["data/CAI_tables/homo_CAI_table.txt", \
"data/CAI_tables/euplotid_CAI_table.txt", \
"data/CAI_tables/tetrahymena_CAI_table.txt"]), \
("transition_matrices", \
["data/transition_matrices/homo_transition_matrix.pic", \
"data/transition_matrices/euplotid_transition_matrix.pic", \
"data/transition_matrices/tetrahymena_transition_matrix.pic"])],
# package_data={"BioClasses": ["data/genetic_codes/*.txt", "data/CAI_tables/*.txt", "data/transition_matrices/*.pic"]},
requires=["pysam", "Biopython", "scipy"],
author="Paul K. Korir",
author_email="[email protected]",
url="http://www.paulkorir.com/projects/BioClasses",
license="LICENSE.txt",
description="Python classes for bioinformatics",
long_description=open( "README.txt" ).read(),
)
| gpl-2.0 | 1,539,110,896,714,187,800 | 38.074074 | 120 | 0.720379 | false |
pablobesada/tw | twfetch.py | 1 | 9251 | #encoding: utf-8
"""
from twitter import Twitter
twitter = Twitter.getTwitter()
q = twitter.search(q='scioli')
print q['search_metadata']
for t in q['statuses']:
print t
"""
#import tweetstream
import time
import threading
from twython import TwythonStreamer, Twython
from pymongo import MongoClient
from datetime import datetime, timedelta
from frontend.rulesmanager import getBrandClassifiers, getTopicClassifiers, getAccountsToTrack
import argparse
from frontend.brandclassifier import ProductMatch
import traceback
from frontend.gnip.pipeline import Pipeline
from frontend.gnip import pipelinestages
parser = argparse.ArgumentParser()
parser.add_argument('--auth', action="store_true", default=False)
parser.add_argument('--host', default='')
args = parser.parse_args()
dbuser = "monitor"
dbpasswd = "monitor678"
if args.host:
mclient = MongoClient(args.host)
else:
mclient = MongoClient()
monitor = mclient['monitor']
if args.auth:
monitor.authenticate(dbuser, dbpasswd)
#db.authenticate("pablo", "1234")
"""
words = ["opera", "firefox", "safari"]
#people = [123,124,125]
locations = ["-122.75,36.8", "-121.75,37.8"]
with tweetstream.FilterStream("pablobesada", "paddle26", track=words, locations=locations) as stream:
for tweet in stream:
print "Got interesting tweet:", tweet
"""
def getWordsToTrack():
accounts = monitor.accounts.find({})
s = set()
for acc in accounts:
for cid, campaign in acc['campaigns'].items():
if not 'active' in campaign or not campaign['active']: continue
for bid, brand in campaign['brands'].items():
s.add(brand['name'])
if brand.get('synonyms','').strip():
for kw in [kw.strip() for kw in brand['synonyms'].split(",") if kw.strip()]:
s.add(kw)
return s
stream = None
def getHashtagsToTrack():
accounts = monitor.accounts.find({})
s = dict()
for acc in accounts:
if not 'polls' in acc: continue
for pid, poll in acc['polls'].items():
hts = set()
if poll.get('poll_hashtag', ''):
hts.add(poll['poll_hashtag'].strip())
else:
for ht in [ht.strip() for ht in poll['hashtags'].split(",") if ht.strip()]:
hts.add(ht)
for ht in hts:
if ht not in s: s[ht] = []
s[ht].append({"aid": str(acc['_id']), "pid": pid})
return s
class TweetStreamer(TwythonStreamer):
TWITTER_ADDRESS = "@TestProdeBr2014"
CONSUMER_KEY = "1qxRMuTzu2I7BP7ozekfRw"
CONSUMER_SECRET = "whQFHN8rqR78L6su6U32G6TPT7e7W2vCouR4inMfM"
ACCESS_TOKEN = "2305874377-TTmvLjLuP8aq8q2bT7GPJsOjG9n6uYLAA0tvsYU"
ACCESS_KEY = "iy4SYpkHK26Zyfr9RhYSGOLVtd9eMNF6Ebl2p552gF4vL"
def __init__(self):
import threading
TwythonStreamer.__init__(self, TweetStreamer.CONSUMER_KEY, TweetStreamer.CONSUMER_SECRET, TweetStreamer.ACCESS_TOKEN, TweetStreamer.ACCESS_KEY)
self.tweets = []
self.stop = False
print "stream created %s" % id(self)
def on_success(self, data):
try:
print "received:", data['text'], id(self)
except:
pass
self.tweets.append(data)
def on_error(self, status_code, data):
try:
print "error:", status_code, data
except:
pass
# Want to stop trying to get data because of the error?
# Uncomment the next line!
# self.disconnect()
def __iter__(self):
return self
def next(self):
print "en next"
while not self.tweets:
if self.stop: raise StopIteration
print "%s, waiting... %s" % (datetime.now(), id(self))
time.sleep(0.5)
t = self.tweets.pop(0)
return t
def finish(self):
print "finishing streamer thread... %s" % id(self)
global stream
print "current stream: %s" % id(stream)
self.disconnect()
self.stop = True
stream = None
print "Streamer thread finished"
class MyThread(threading.Thread):
keywords = []
accountsToTrack = []
accountsToTrackIds = []
hashtagsToTrack = []
running = False
def run(self):
MyThread.running = True
global stream
stream = TweetStreamer()
#k = stream.statuses.filter(follow="138814032,31133330,117185027", track=["CFKArgentina", "cristina", "cris", "kirchner", "scioli", "massa"], language="es")
#kwords = ['unilever', 'dove', 'knorr', 'maizena', 'ala', 'skip', 'lux', 'ades', 'ponds', "pond's", 'rexona', "hellman's", "axe", "cif", "savora", "impulse", "vivere", "suave", "hellen curtis", "lipton" ,"lifebuoy", "drive", "sedal", "comfort", "clear", "vasenol", "vim"] #argentina
#kwords = ['unilever', "ades", "pond's", "ponds", "st. ives", "ives", "knorr", "dove", "axe", "tresemme", u"tresemmé", "sedal", "hellman's", "cif" , "iberia", "rexona", "maizena", "vo5", "clear", "nexxus", "vasenol", "lipton", "not butter", "ben & jerry's", "jerry's", "slim-fast", "slimfast", "del valle", "jumex", 'veet', 'nair', 'america','sivale','sivalesi','crujitos'] #"holanda (helado)", "primavera (margarina)" #mexico
if MyThread.keywords or MyThread.accountsToTrack or MyThread.accountsToTrackIds or MyThread.hashtagsToTrack:
k = stream.statuses.filter(follow=list(MyThread.accountsToTrackIds), track=list(MyThread.keywords) + list(MyThread.accountsToTrack) + list(MyThread.hashtagsToTrack), language="es")
MyThread.running = False
# (follow="138814032", track=["CFKArgentina", "cristina", "cris"])
#(track=['scioli','massa','cfk','solanas','@cfkargentina','@danielscioli','@SergioMassa'])
bcs = None
tcs = None
class KeywordMonitor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop = False
def run(self):
global bcs
global tcs
global stream
t = datetime.now() - timedelta(hours=99)
keywords = None
accountsToTrack = None
hashtagsToTrack = None
checking_interval=6 #seconds
while not self.stop:
if datetime.now()-t > timedelta(seconds=checking_interval):
print "checking keywords and accounts to track..."
t = datetime.now()
k2 = getWordsToTrack()
a2, i2 = getAccountsToTrack()
h2 = getHashtagsToTrack()
bcs = getBrandClassifiers()
tcs2 = getTopicClassifiers()
if k2 != keywords or a2 != accountsToTrack or i2 != accountsToTrackIds or h2 != hashtagsToTrack or not (tcs2 == tcs):
print "keyword or account changes found... restarting fetcher"
print (tcs2 == tcs)
if stream: stream.finish()
while MyThread.running: time.sleep(1)
keywords = k2
accountsToTrack = a2
accountsToTrackIds = i2
hashtagsToTrack = h2
tcs = tcs2
MyThread.keywords = keywords
MyThread.accountsToTrack = accountsToTrack
MyThread.accountsToTrackIds = accountsToTrackIds
MyThread.hashtagsToTrack = hashtagsToTrack
MyThread().start()
try:
print "Tracking:", keywords
print "Accounts: ", accountsToTrack
print "Hashtags: ", hashtagsToTrack
open("tracking_words.txt", "wb").write(str(keywords))
open("tracking_accounts.txt", "wb").write(str(accountsToTrack))
open("tracking_hashtags.txt", "wb").write(str(hashtagsToTrack))
except:
pass
time.sleep(1)
else:
time.sleep(checking_interval/2)
def finish(self):
print "finishing keyword monitor thread..."
self.stop = True
try:
#t = Twython("1qxRMuTzu2I7BP7ozekfRw", "whQFHN8rqR78L6su6U32G6TPT7e7W2vCouR4inMfM", "2305874377-TTmvLjLuP8aq8q2bT7GPJsOjG9n6uYLAA0tvsYU", "iy4SYpkHK26Zyfr9RhYSGOLVtd9eMNF6Ebl2p552gF4vL")
#user_id = t.lookup_user(screen_name='pablobesada')[0]['id_str']
#print user_id
#exit(0)
bcs = getBrandClassifiers()
tcs = getTopicClassifiers()
kwmonitor = KeywordMonitor()
kwmonitor.start()
pipeline = Pipeline()
for plsc in pipelinestages.getPipelineTwitterStageClasses():
pipeline.appendStage(plsc())
pipeline.startWorking()
while True:
while not stream:
time.sleep(0.2)
for t in stream:
pipeline.getSourceQueue().put(t)
except KeyboardInterrupt, e:
pass
except Exception, e:
print traceback.format_exc()
if kwmonitor: kwmonitor.finish()
if stream: stream.finish()
raise
pass
if kwmonitor: kwmonitor.finish()
if stream: stream.finish()
pipeline.stopWorking()
| apache-2.0 | 3,237,573,120,783,323,000 | 35.561265 | 434 | 0.592 | false |
nash-x/hws | neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py | 1 | 8207 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.mech_sriov import exceptions as exc
LOG = log.getLogger(__name__)
FLAT_VLAN = 0
sriov_opts = [
cfg.ListOpt('supported_pci_vendor_devs',
default=['15b3:1004', '8086:10c9', '8086:1520', '8086:10ed'],
help=_("Supported PCI vendor devices, defined by "
"vendor_id:product_id according to the PCI ID "
"Repository. Default enables support for Intel "
"and Mellanox SR-IOV capable NICs")),
cfg.BoolOpt('agent_required',
default=False,
help=_("SRIOV neutron agent is required for port binding")),
]
cfg.CONF.register_opts(sriov_opts, "ml2_sriov")
class SriovNicSwitchMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for SR-IOV capable NIC based switching.
The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the
sriovNicSwitch L2 agent depending on configuration option.
Port binding with this driver may require the sriovNicSwitch agent
to be running on the port's host, and that agent to have connectivity
to at least one segment of the port's network.
L2 agent is not essential for port binding; port binding is handled by
VIF Driver via libvirt domain XML.
L2 Agent presents in order to manage port update events.
If vendor NIC does not support updates, setting agent_required = False
will allow to use Mechanism Driver without L2 agent.
"""
def __init__(self,
agent_type=constants.AGENT_TYPE_NIC_SWITCH,
vif_type=portbindings.VIF_TYPE_HW_VEB,
vif_details={portbindings.CAP_PORT_FILTER: False},
supported_vnic_types=[portbindings.VNIC_DIRECT,
portbindings.VNIC_MACVTAP],
supported_pci_vendor_info=None):
"""Initialize base class for SriovNicSwitch L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_type: Value for binding:vif_type when bound
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
:param supported_pci_vendor_info: The pci_vendor_info values to bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
self.vif_type = vif_type
self.vif_details = vif_details
def initialize(self):
try:
self.pci_vendor_info = self._parse_pci_vendor_config(
cfg.CONF.ml2_sriov.supported_pci_vendor_devs)
self.agent_required = cfg.CONF.ml2_sriov.agent_required
except ValueError:
LOG.exception(_("Failed to parse supported PCI vendor devices"))
raise cfg.Error(_("Parsing supported pci_vendor_devs failed"))
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
if not self._check_supported_pci_vendor_device(context):
LOG.debug("Refusing to bind due to unsupported pci_vendor device")
return
if self.agent_required:
for agent in context.host_agents(self.agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
if self.try_to_bind(context, agent):
return
else:
LOG.warning(_("Attempting to bind with dead agent: %s"),
agent)
else:
self.try_to_bind(context)
def try_to_bind(self, context, agent=None):
for segment in context.network.network_segments:
if self.check_segment(segment, agent):
context.set_binding(segment[api.ID],
self.vif_type,
self.get_vif_details(context, segment),
constants.PORT_STATUS_ACTIVE)
LOG.debug("Bound using segment: %s", segment)
return True
return False
def check_segment(self, segment, agent=None):
"""Check if segment can be bound.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind or None
:returns: True if segment can be bound for agent
"""
supported_network_types = (p_const.TYPE_VLAN, p_const.TYPE_FLAT)
network_type = segment[api.NETWORK_TYPE]
if network_type in supported_network_types:
if agent:
mappings = agent['configurations'].get('device_mappings', {})
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s ",
{'segment': segment, 'mappings': mappings})
return segment[api.PHYSICAL_NETWORK] in mappings
return True
return False
def _check_supported_pci_vendor_device(self, context):
if self.pci_vendor_info:
profile = context.current.get(portbindings.PROFILE, {})
if not profile:
LOG.debug("Missing profile in port binding")
return False
pci_vendor_info = profile.get('pci_vendor_info')
if not pci_vendor_info:
LOG.debug("Missing pci vendor info in profile")
return False
if pci_vendor_info not in self.pci_vendor_info:
LOG.debug("Unsupported pci_vendor %s", pci_vendor_info)
return False
return True
return False
def get_vif_details(self, context, segment):
network_type = segment[api.NETWORK_TYPE]
if network_type == p_const.TYPE_FLAT:
vlan_id = FLAT_VLAN
elif network_type == p_const.TYPE_VLAN:
vlan_id = segment[api.SEGMENTATION_ID]
else:
raise exc.SriovUnsupportedNetworkType(net_type=network_type)
self.vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
return self.vif_details
def _parse_pci_vendor_config(self, pci_vendor_list):
parsed_list = []
for elem in pci_vendor_list:
elem = elem.strip()
if not elem:
continue
split_result = elem.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid pci_vendor_info: '%s'") % elem)
vendor_id = split_result[0].strip()
if not vendor_id:
raise ValueError(_("Missing vendor_id in: '%s'") % elem)
product_id = split_result[1].strip()
if not product_id:
raise ValueError(_("Missing product_id in: '%s'") % elem)
parsed_list.append(elem)
return parsed_list
| apache-2.0 | 8,194,136,619,381,212,000 | 42.194737 | 78 | 0.603509 | false |
imagi-ng/metadata-database | metadata_database/model.py | 1 | 4164 | import metadata_database.type
class Base(metadata_database.database.Model):
__abstract__ = True
id = metadata_database.database.Column(
metadata_database.type.UUID,
primary_key=True
)
class Annotation(Base):
__tablename__ = "annotations"
annotator_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("annotators.id")
)
category_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("categories.id")
)
channel_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("channels.id")
)
class Annotator(Base):
__tablename__ = "annotators"
annotations = metadata_database.database.relationship(
"Annotation",
backref="annotator"
)
token = metadata_database.database.Column(
metadata_database.database.String(255)
)
class Category(Base):
__tablename__ = "categories"
annotations = metadata_database.database.relationship(
"Annotation",
backref="category"
)
description = metadata_database.database.Column(
metadata_database.database.String(255)
)
class Channel(Base):
__tablename__ = "channels"
annotations = metadata_database.database.relationship(
"Annotation",
backref="channel"
)
image_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("images.id")
)
stains = metadata_database.database.relationship(
"Stain",
secondary="stainable",
backref="channel"
)
class Experiment(Base):
__tablename__ = "experiments"
class Image(Base):
__tablename__ = "images"
imageable_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("imageable.id")
)
imageable = metadata_database.database.relationship(
"Imageable",
backref="images"
)
channels = metadata_database.database.relationship(
"Channel",
backref="image"
)
class Imageable(Base):
__tablename__ = "imageable"
type = metadata_database.database.Column(
metadata_database.database.String(36)
)
__mapper_args__ = {
"polymorphic_identity": "imageable",
"polymorphic_on": type
}
class Plate(Base):
__tablename__ = "plates"
experiment_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("experiments.id")
)
channels = metadata_database.database.relationship(
"Well",
backref="plate"
)
class Slide(Imageable):
__tablename__ = "slides"
id = metadata_database.database.Column(
metadata_database.type.UUID,
metadata_database.database.ForeignKey("imageable.id"),
primary_key=True
)
experiment_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("experiments.id")
)
__mapper_args__ = {
"polymorphic_identity": "slides",
}
class Stain(Base):
__tablename__ = "stains"
name = metadata_database.database.Column(
metadata_database.database.String(255)
)
class Stainable(Base):
__tablename__ = "stainable"
channel_id = metadata_database.database.Column(
metadata_database.type.UUID,
metadata_database.database.ForeignKey("channels.id")
)
stain_id = metadata_database.database.Column(
metadata_database.type.UUID,
metadata_database.database.ForeignKey("stains.id")
)
class Well(Imageable):
__tablename__ = "wells"
id = metadata_database.database.Column(
metadata_database.type.UUID,
metadata_database.database.ForeignKey("imageable.id"),
primary_key=True
)
plate_id = metadata_database.database.Column(
metadata_database.database.ForeignKey("plates.id")
)
x = metadata_database.database.Column(
metadata_database.database.String(255)
)
y = metadata_database.database.Column(
metadata_database.database.String(255)
)
__mapper_args__ = {
"polymorphic_identity": "wells",
}
| mit | -3,911,850,472,369,324,000 | 21.508108 | 63 | 0.644813 | false |
vipul-sharma20/fossevents.in | fossevents/events/migrations/0001_initial.py | 1 | 1738 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('description', models.TextField(verbose_name='description')),
('start_date', models.DateTimeField(verbose_name='start date')),
('end_date', models.DateTimeField(verbose_name='end date', blank=True)),
('homepage', models.URLField(verbose_name='homepage', blank=True)),
('is_published', models.BooleanField(default=False, verbose_name='is published')),
('auth_token', models.CharField(max_length=100)),
('owner_email', models.EmailField(help_text='Email address of the submitter.', max_length=256, verbose_name="owner's email address")),
],
options={
'ordering': ('-start_date',),
'verbose_name': 'event',
'verbose_name_plural': 'events',
},
),
]
| mit | -9,013,716,467,257,346,000 | 45.972973 | 172 | 0.610472 | false |
markuswissinger/ducktestpy | ducktest/base.py | 1 | 1475 | """
Copyright 2016 Markus Wissinger. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod, ABCMeta
from collections import namedtuple
class Processor(object):
__metaclass__ = ABCMeta
def __init__(self):
self.next_processor = None
@abstractmethod
def process(self, *args, **kwargs):
pass
class ChainTerminator(Processor):
def process(self, *args, **kwargs):
pass
def last_after(processor):
while processor.next_processor:
processor = processor.next_processor
return processor
def chain(*processors):
for first, second in zip(processors[:-1], processors[1:]):
last_after(first).next_processor = second
return processors[0]
PlainTypeWrapper = namedtuple('PlainTypeWrapper', 'own_type')
ContainerTypeWrapper = namedtuple('ContainerTypeWrapper', ['own_type', 'contained_types'])
MappingTypeWrapper = namedtuple('MappingTypeWrapper', ['own_type', 'mapped_types'])
| apache-2.0 | -3,634,547,098,730,059,300 | 27.921569 | 90 | 0.732203 | false |
andrewdodd/drf-timeordered-pagination | tests/conftest.py | 1 | 1512 | def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'timeordered_pagination',
'tests',
),
REST_FRAMEWORK={
'PAGE_SIZE': 5,
},
)
try:
import django
django.setup()
except AttributeError:
pass
| mit | 8,540,495,660,768,154,000 | 31.170213 | 70 | 0.558201 | false |
omf2097/pyomftools | omftools/pyshadowdive/palette_mapping.py | 1 | 1335 | from validx import Dict, List
from .protos import DataObject
from .palette import Palette
from .utils.parser import BinaryParser
from .utils.validator import UInt8
from .utils.types import Remappings, Remapping
class PaletteMapping(DataObject):
__slots__ = (
"colors",
"remaps",
)
schema = Dict({"colors": Palette.schema, "remaps": List(List(UInt8))})
def __init__(self):
self.colors: Palette = Palette()
self.remaps: Remappings = []
def remap(self, remap_id: int) -> Palette:
return self.colors.remap(self.remaps[remap_id])
def read(self, parser: BinaryParser):
self.colors = Palette().read(parser)
for k in range(0, 19):
remap: Remapping = []
for m in range(0, 256):
remap.append(parser.get_uint8())
self.remaps.append(remap)
return self
def write(self, parser):
self.colors.write(parser)
for k in range(0, 19):
for m in range(0, 256):
parser.put_uint8(self.remaps[k][m])
def serialize(self) -> dict:
return {"colors": self.colors.serialize(), "remaps": self.remaps}
def unserialize(self, data: dict):
self.colors = Palette().unserialize(data["colors"])
self.remaps = data["remaps"]
return self
| mit | -2,354,301,160,322,109,000 | 28.021739 | 74 | 0.6 | false |
thomasgibson/tabula-rasa | HDG_CG_comp/base.py | 1 | 2187 | from firedrake import *
from firedrake.utils import cached_property
from abc import ABCMeta, abstractproperty
class Problem(object):
__metaclass__ = ABCMeta
def __init__(self, N=None, degree=None, dimension=None,
quadrilaterals=False):
super(Problem, self).__init__()
self.degree = degree
self.N = N
self.dim = dimension
self.quads = quadrilaterals
@property
def comm(self):
return self.mesh.comm
@cached_property
def mesh(self):
if self.dim == 2:
return UnitSquareMesh(self.N, self.N, quadrilateral=self.quads)
else:
assert self.dim == 3
if self.quads:
base = UnitSquareMesh(self.N, self.N, quadrilateral=self.quads)
return ExtrudedMesh(base, self.N, layer_height=1.0/self.N)
else:
return UnitCubeMesh(self.N, self.N, self.N)
@abstractproperty
def name(self):
pass
@abstractproperty
def function_space(self):
pass
@abstractproperty
def u(self):
pass
@abstractproperty
def a(self):
pass
@abstractproperty
def L(self):
pass
@abstractproperty
def bcs(self):
pass
@cached_property
def analytic_solution(self):
x = SpatialCoordinate(self.mesh)
if self.dim == 2:
return exp(sin(pi*x[0])*sin(pi*x[1]))
else:
assert self.dim == 3
return exp(sin(pi*x[0])*sin(pi*x[1])*sin(pi*x[2]))
def solver(self, parameters=None):
# For the rebuilding of the Jacobian to record assembly time
problem = LinearVariationalProblem(self.a, self.L, self.u,
bcs=self.bcs,
constant_jacobian=False)
solver = LinearVariationalSolver(problem, solver_parameters=parameters)
return solver
@abstractproperty
def output(self):
pass
@abstractproperty
def err(self):
pass
@abstractproperty
def true_err(self):
pass
@abstractproperty
def sol(self):
pass
| mit | 3,198,064,129,734,001,000 | 22.516129 | 79 | 0.564243 | false |
dhmodi/virtual_patient_assistant | cognitiveSQL/LangConfig.py | 1 | 4537 | # -*- coding: utf-8 -*
import sys, re
import unicodedata
#reload(sys)
#sys.setdefaultencoding("utf-8")
class LangConfig:
def __init__(self):
self.avg_keywords = []
self.sum_keywords = []
self.max_keywords = []
self.min_keywords = []
self.count_keywords = []
self.junction_keywords = []
self.disjunction_keywords = []
self.greater_keywords = []
self.less_keywords = []
self.between_keywords = []
self.order_by_keywords = []
self.group_by_keywords = []
self.negation_keywords = []
def get_avg_keywords(self):
return self.avg_keywords
def get_sum_keywords(self):
return self.sum_keywords
def get_max_keywords(self):
return self.max_keywords
def get_min_keywords(self):
return self.min_keywords
def get_count_keywords(self):
return self.count_keywords
def get_junction_keywords(self):
return self.junction_keywords
def get_disjunction_keywords(self):
return self.disjunction_keywords
def get_greater_keywords(self):
return self.greater_keywords
def get_less_keywords(self):
return self.less_keywords
def get_between_keywords(self):
return self.between_keywords
def get_order_by_keywords(self):
return self.order_by_keywords
def get_group_by_keywords(self):
return self.group_by_keywords
def get_negation_keywords(self):
return self.negation_keywords
def remove_accents(self, string):
nkfd_form = unicodedata.normalize('NFKD', string)
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
def load(self, path):
with open(path) as f:
content = f.readlines()
self.avg_keywords = list(map(self.remove_accents, map(str.strip, content[0].replace(':',',').split(","))))
self.avg_keywords = self.avg_keywords[1:len(list(self.avg_keywords))]
self.sum_keywords = list(map(self.remove_accents, map(str.strip, content[1].replace(':',',').split(","))))
self.sum_keywords = self.sum_keywords[1:len(list(self.sum_keywords))]
self.max_keywords = list(map(self.remove_accents, map(str.strip, content[2].replace(':',',').split(","))))
self.max_keywords = self.max_keywords[1:len(list(self.max_keywords))]
self.min_keywords = list(map(self.remove_accents, map(str.strip, content[3].replace(':',',').split(","))))
self.min_keywords = self.min_keywords[1:len(list(self.min_keywords))]
self.count_keywords = list(map(self.remove_accents, map(str.strip, content[4].replace(':',',').split(","))))
self.count_keywords = self.count_keywords[1:len(list(self.count_keywords))]
self.junction_keywords = list(map(self.remove_accents, map(str.strip, content[5].replace(':',',').split(","))))
self.junction_keywords = self.junction_keywords[1:len(list(self.junction_keywords))]
self.disjunction_keywords = list(map(self.remove_accents, map(str.strip, content[6].replace(':',',').split(","))))
self.disjunction_keywords = self.disjunction_keywords[1:len(list(self.disjunction_keywords))]
self.greater_keywords = list(map(self.remove_accents, map(str.strip, content[7].replace(':',',').split(","))))
self.greater_keywords = self.greater_keywords[1:len(list(self.greater_keywords))]
self.less_keywords = list(map(self.remove_accents, map(str.strip, content[8].replace(':',',').split(","))))
self.less_keywords = self.less_keywords[1:len(list(self.less_keywords))]
self.between_keywords = list(map(self.remove_accents, map(str.strip, content[9].replace(':',',').split(","))))
self.between_keywords = self.between_keywords[1:len(list(self.between_keywords))]
self.order_by_keywords = list(map(self.remove_accents, map(str.strip, content[10].replace(':',',').split(","))))
self.order_by_keywords = self.order_by_keywords[1:len(list(self.order_by_keywords))]
self.group_by_keywords = list(map(self.remove_accents, map(str.strip, content[11].replace(':',',').split(","))))
self.group_by_keywords = self.group_by_keywords[1:len(list(self.group_by_keywords))]
self.negation_keywords = list(map(self.remove_accents, map(str.strip, content[12].replace(':',',').split(","))))
self.negation_keywords = self.negation_keywords[1:len(list(self.negation_keywords))]
def print_me(self):
print (self.avg_keywords)
print (self.sum_keywords)
print (self.max_keywords)
print (self.min_keywords)
print (self.count_keywords)
print (self.junction_keywords)
print (self.disjunction_keywords)
print (self.greater_keywords)
print (self.less_keywords)
print (self.between_keywords)
print (self.order_by_keywords)
print (self.group_by_keywords)
print (self.negation_keywords)
| apache-2.0 | 5,558,215,102,798,540,000 | 39.159292 | 117 | 0.699581 | false |
James-cB/openPoke | vs2012/openPoke_1/openPoke_1/Threads_def.py | 1 | 5087 |
def programStart():
start = raw_input(" Would you like to begin the simulation? \n\n ")
if start in {"Yes", "yes", "Yep", "yep", "Begin", "begin", "Start", "start", "y", "Y"}:
time.sleep(0.3)
print ""
time.sleep(0.2)
print ""
time.sleep(0.3)
print ""
time.sleep(0.3)
print ""
time.sleep(0.3)
print ""
PokemonSelect()
elif start in {"No", "no", "Never", "never", "Stop", "stop", "Quit", "quit", "Exit", "exit"}:
print "....Well fine, be that way....I didn't want to play with you anywawy."
time.sleep(0.3)
cl_restart()
#quit()
elif start in {"Debug", "debug", "Admin", "admin"}:
admin_pw = raw_input ("Please enter Administrator password. ")
if admin_pw == 'gaben1337':
print ("Administrator Mode has been enabled. ")
time.sleep(0.7)
cls()
execfile ("Admin.py")
else:
print ("Error: Incorrect password.")
print ("Please try again later.")
time.sleep(2)
cls()
execfile ("Frontend.py")
elif start in { "no fak u", "No fak u", "NO FAK U", "no fuck you", "No fuck you", "NO FUCK YOU"}:
print "Fuck off Jesse. Seriously, nobody likes you."
time.sleep(0.9)
cls()
time.sleep(0.5)
print "Except brandon. ;)"
time.sleep(0.9)
cl_restart()
else:
print ("ERROR_01_UNHANDLED_RESPONSE")
print ("")
print ("Whoops. Let's try again.")
print ("")
cl_restart()
def cls(): print ("\n" * 100)
def clear(): print ("\n" * 100)
def cl_restart():
print ("")
print ("")
print ("Client restart has been called.")
time.sleep(0.8)
print (".")
time.sleep(0.8)
print ("..")
time.sleep(0.8)
print ("...")
time.sleep(0.8)
print ("....")
time.sleep(0.8)
print (".....")
time.sleep(1.9)
cls()
execfile ("Frontend.py")
def log_on():
#f = open('log.txt', 'w')
#original = sys.stdout
#sys.stdout = Tee(sys.stdout, f)
print ("This command is currently broken.") # This will go to stdout and the file out.txt
def log_off():
#sys.stdout = original
print ("This command is currently broken." ) # Only on stdout
#f.close()
def poke_battle_mode():
#Sets whether the battle mode is "openPoke" style, "classic" GenI style, or "Modern" GenIV-VI style.
#Valid Options are 1, 2, and 3.
#Only working option is 1, until openPoke actually gets closer to being finished.
print ("This option (copy paste from code) Sets whether the battle mode is |openPoke| style, |classic| GenI style, or |Modern| GenIV-VI style.")
#loads the AI client on startup, otherwise ignored.
def poke_battle_gen_dex():
#Sets what generation to use for the battle roster.
print ("This option will eventually allow you to choose from which roster you battle from. Examples are 1 for Kanto, 4 for Sinnoh, 6 for Modern Johto aka Gen4 Johto, 11 for Gen3 National Dex, and so on. ")
def PokemonSelect():
PokemonP1 = raw_input(" Select your starter pokemon. \n Alpha pokemon are Charmander and Bulbasaur. \n\n\n ")
if PokemonP1 in {"Bulbasaur", "Bulbasaur", "b", "B"}:
print ""
print ("You have selected Bulbasaur as your pokemon for this battle.")
print ""
time.sleep(3)
cls()
#execfile ("BattleSystem_Backend.py")
if PokemonP1 in {"Charmander", "charmander", "C", "c"}:
print ""
print ("You have selected Charmander as your pokemon for this battle.")
print ""
time.sleep(3)
cls()
#execfile ("BattleSystem_Backend.py")
print "Loading Battle System"
time.sleep(0.3)
print "."
time.sleep(0.3)
print ".."
time.sleep(0.3)
print "..."
time.sleep(0.3)
print "...."
time.sleep(0.3)
print "....."
time.sleep(0.7)
battleSubsystems()
def PokemonSelect2(n):
name1, name2 = "null", "null"
if PokemonP1 in {"Bulbasaur", "Bulbasaur", "b", "B"}:
name1 == "Bulbasaur"
name2 == "Charmander"
if PokemonP1 in {"Charmander", "charmander", "C", "c"}:
name1 == "Charmander"
name2 == "Bulbasaur"
else:
print "Error assigning pokemon name."
def battleSubsystems():
print ""
print " Backend Math Subsystems loaded and functioning."
print " Beginning Trainer Battle Simulation!"
print ""
def demoOver():
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
print ""
time.sleep (0.4)
cls() | gpl-2.0 | -1,430,950,072,649,434,400 | 26.803279 | 209 | 0.535876 | false |
eharney/cinder | cinder/volume/drivers/hitachi/hbsd_horcm.py | 1 | 58062 | # Copyright (C) 2014, 2015, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import re
import shlex
import threading
import time
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
GETSTORAGEARRAY_ONCE = 100
MAX_SNAPSHOT_COUNT = 1021
SNAP_LAST_PATH_SSB = '0xB958,0x020A'
HOST_IO_SSB = '0xB958,0x0233'
INVALID_LUN_SSB = '0x2E20,0x0000'
INTERCEPT_LDEV_SSB = '0x2E22,0x0001'
HOSTGROUP_INSTALLED = '0xB956,0x3173'
RESOURCE_LOCKED = 'SSB=0x2E11,0x2205'
LDEV_STATUS_WAITTIME = 120
LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME
LUN_DELETE_INTERVAL = 3
EXEC_MAX_WAITTIME = 30
EXEC_RETRY_INTERVAL = 5
HORCM_WAITTIME = 1
PAIR_TYPE = ('HORC', 'MRCF', 'QS')
PERMITTED_TYPE = ('CVS', 'HDP', 'HDT')
RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_'
HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_'
RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_'
STATUS_TABLE = {
'SMPL': basic_lib.SMPL,
'COPY': basic_lib.COPY,
'RCPY': basic_lib.COPY,
'PAIR': basic_lib.PAIR,
'PFUL': basic_lib.PAIR,
'PSUS': basic_lib.PSUS,
'PFUS': basic_lib.PSUS,
'SSUS': basic_lib.PSUS,
'PSUE': basic_lib.PSUE,
}
NOT_SET = '-'
HORCM_RUNNING = 1
COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d'
SNAP_NAME = basic_lib.NAME_PREFIX + 'snap'
LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d'
MAX_MUNS = 3
EX_ENAUTH = 202
EX_ENOOBJ = 205
EX_CMDRJE = 221
EX_CMDIOE = 237
EX_INVCMD = 240
EX_INVMOD = 241
EX_ENODEV = 246
EX_ENOENT = 247
EX_OPTINV = 248
EX_ATTDBG = 250
EX_ATTHOR = 251
EX_COMERR = 255
NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT)
COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV)
HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR)
MAX_HOSTGROUPS = 254
MAX_HLUN = 2047
DEFAULT_PORT_BASE = 31000
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('hitachi_horcm_numbers',
default='200,201',
help='Instance numbers for HORCM'),
cfg.StrOpt('hitachi_horcm_user',
help='Username of storage system for HORCM'),
cfg.StrOpt('hitachi_horcm_password',
help='Password of storage system for HORCM',
secret=True),
cfg.BoolOpt('hitachi_horcm_add_conf',
default=True,
help='Add to HORCM configuration'),
cfg.IntOpt('hitachi_horcm_resource_lock_timeout',
default=600,
help='Timeout until a resource lock is released, in seconds. '
'The value must be between 0 and 7200.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
def horcm_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
if len(args) == 1:
inst = args[0].conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = args[0].raidcom_lock
else:
inst = args[1]
raidcom_obj_lock = args[0].raidcom_pair_lock
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
return function(*args, **kargs)
return wrapper
def storage_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
serial = args[0].conf.hitachi_serial_number
resource_lock = args[0].resource_lock
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
lock = basic_lib.get_process_lock(resource_lock_file)
with resource_lock, lock:
return function(*args, **kargs)
return wrapper
class HBSDHORCM(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDHORCM, self).__init__(conf=conf)
self.copy_groups = [None] * MAX_MUNS
self.raidcom_lock = threading.Lock()
self.raidcom_pair_lock = threading.Lock()
self.horcmgr_lock = threading.Lock()
self.horcmgr_flock = None
self.resource_lock = threading.Lock()
def check_param(self):
numbers = self.conf.hitachi_horcm_numbers.split(',')
if len(numbers) != 2:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for i in numbers:
if not i.isdigit():
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
self.conf.hitachi_horcm_numbers = [int(num) for num in numbers]
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
if inst == pair_inst:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for param in ('hitachi_horcm_user', 'hitachi_horcm_password'):
if not getattr(self.conf, param):
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id:
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
raise exception.HBSDError(message=msg)
resource_lock_timeout = self.conf.hitachi_horcm_resource_lock_timeout
if not ((resource_lock_timeout >= 0) and
(resource_lock_timeout <= 7200)):
msg = basic_lib.output_err(
601, param='hitachi_horcm_resource_lock_timeout')
raise exception.HBSDError(message=msg)
for opt in volume_opts:
getattr(self.conf, opt.name)
def set_copy_groups(self, host_ip):
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[1]
for mun in range(MAX_MUNS):
copy_group = COPY_GROUP % (host_ip, serial, inst, mun)
self.copy_groups[mun] = copy_group
def set_pair_flock(self):
inst = self.conf.hitachi_horcm_numbers[1]
name = '%s%d' % (HORCMGR_LOCK_FILE, inst)
self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock)
return self.horcmgr_flock
def check_horcm(self, inst):
args = 'HORCMINST=%d horcmgr -check' % inst
ret, _stdout, _stderr = self.exec_command('env', args=args,
printflag=False)
return ret
def shutdown_horcm(self, inst):
ret, stdout, stderr = self.exec_command(
'horcmshutdown.sh', args=six.text_type(inst), printflag=False)
return ret
def start_horcm(self, inst):
return self.exec_command('horcmstart.sh', args=six.text_type(inst),
printflag=False)
def _wait_for_horcm_shutdown(self, inst):
if self.check_horcm(inst) != HORCM_RUNNING:
raise loopingcall.LoopingCallDone()
if self.shutdown_horcm(inst):
LOG.error("Failed to shutdown horcm.")
raise loopingcall.LoopingCallDone()
@horcm_synchronized
def restart_horcm(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_horcm_shutdown, inst)
loop.start(interval=HORCM_WAITTIME).wait()
ret, stdout, stderr = self.start_horcm(inst)
if ret:
msg = basic_lib.output_err(
600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def restart_pair_horcm(self):
inst = self.conf.hitachi_horcm_numbers[1]
self.restart_horcm(inst=inst)
def setup_horcmgr(self, host_ip):
pair_inst = self.conf.hitachi_horcm_numbers[1]
self.set_copy_groups(host_ip)
if self.conf.hitachi_horcm_add_conf:
self.create_horcmconf()
self.create_horcmconf(inst=pair_inst)
self.restart_horcm()
with self.horcmgr_flock:
self.restart_pair_horcm()
ret, stdout, stderr = self.comm_login()
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def _wait_for_exec_horcm(self, cmd, args, printflag, start):
if cmd == 'raidcom':
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = self.raidcom_lock
args = '%s -s %s -I%d' % (args, serial, inst)
else:
inst = self.conf.hitachi_horcm_numbers[1]
raidcom_obj_lock = self.raidcom_pair_lock
args = '%s -ISI%d' % (args, inst)
user = self.conf.hitachi_horcm_user
passwd = self.conf.hitachi_horcm_password
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
ret, stdout, stderr = self.exec_command(cmd, args=args,
printflag=printflag)
# The resource group may be locked by other software.
# Therefore, wait until the lock is released.
if (RESOURCE_LOCKED in stderr and
(time.time() - start <
self.conf.hitachi_horcm_resource_lock_timeout)):
return
if not ret or ret <= 127:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= EXEC_MAX_WAITTIME:
LOG.error("horcm command timeout.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (ret == EX_ENAUTH and
not re.search("-login %s %s" % (user, passwd), args)):
_ret, _stdout, _stderr = self.comm_login()
if _ret:
LOG.error("Failed to authenticate user.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret in HORCM_ERROR:
_ret = 0
with raidcom_obj_lock, lock:
if self.check_horcm(inst) != HORCM_RUNNING:
_ret, _stdout, _stderr = self.start_horcm(inst)
if _ret and _ret != HORCM_RUNNING:
LOG.error("Failed to start horcm.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret not in COMMAND_IO_TO_RAID:
LOG.error("Unexpected error occurs in horcm.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_raidcom(self, cmd, args, printflag=True):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_horcm, cmd, args, printflag, time.time())
return loop.start(interval=EXEC_RETRY_INTERVAL).wait()
def comm_login(self):
rmi_user = self.conf.hitachi_horcm_user
rmi_pass = self.conf.hitachi_horcm_password
args = '-login %s %s' % (rmi_user, rmi_pass)
return self.exec_raidcom('raidcom', args, printflag=False)
def comm_reset_status(self):
self.exec_raidcom('raidcom', 'reset command_status')
def comm_get_status(self):
return self.exec_raidcom('raidcom', 'get command_status')
def get_command_error(self, stdout):
lines = stdout.splitlines()
line = shlex.split(lines[1])
return int(line[3])
def comm_get_ldev(self, ldev):
opt = 'get ldev -ldev_id %s' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def add_used_hlun(self, port, gid, used_list):
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
lun = int(shlex.split(line)[3])
if lun not in used_list:
used_list.append(lun)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
while start < end:
if end - start + 1 > GETSTORAGEARRAY_ONCE:
cnt = GETSTORAGEARRAY_ONCE
else:
cnt = end - start + 1
opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
ldev_num = None
for line in lines:
if re.match("LDEV :", line):
ldev_num = int(shlex.split(line)[2])
continue
if re.match("VOL_TYPE : NOT DEFINED", line):
return ldev_num
start += GETSTORAGEARRAY_ONCE
else:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
def get_hgname_gid(self, port, host_grp_name):
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[2] == host_grp_name:
return int(line[1])
return None
def get_unused_gid(self, range, port):
_min = range[0]
_max = range[1]
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
free_gid = None
for line in lines[_min + 1:]:
line = shlex.split(line)
if int(line[1]) > _max:
break
if line[2] == '-':
free_gid = int(line[1])
break
if free_gid is None:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
target_wwns = {}
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
target_wwns[port] = line[10]
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
opt = 'get host_grp -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
found_wwns = 0
for line in lines[1:]:
line = shlex.split(line)
if not re.match(basic_lib.NAME_PREFIX, line[2]):
continue
gid = line[1]
opt = 'get hba_wwn -port %s-%s' % (port, gid)
ret, stdout, stderr = self.exec_raidcom(
'raidcom', opt, printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
hba_info = shlex.split(line)
if hba_info[3] in wwns:
hostgroups.append({'port': six.text_type(port),
'gid': int(hba_info[1]),
'initiator_wwn': hba_info[3],
'detected': is_detected})
found_wwns += 1
if len(wwns) == found_wwns:
break
if len(wwns) == found_wwns:
break
def comm_chk_login_wwn(self, wwns, port):
opt = 'get port -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
login_info = shlex.split(line)
if login_info[1] in wwns:
return True
else:
return False
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
security_ports = []
hostgroups = []
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
security = True if line[7] == 'Y' else False
is_detected = None
if login:
is_detected = self.comm_chk_login_wwn(wwns, port)
if security:
self.comm_get_hbawwn(hostgroups, wwns, port, is_detected)
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def _get_lun(self, port, gid, ldev):
lun = None
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[5] == six.text_type(ldev):
lun = int(line[3])
break
return lun
def _wait_for_delete_lun(self, hostgroup, ldev, start):
opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'],
hostgroup['gid'], ldev)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
raise loopingcall.LoopingCallDone()
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
not self.comm_get_snapshot(ldev) or
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr))
if time.time() - start >= LUN_DELETE_WAITTIME:
msg = basic_lib.output_err(
637, method='_wait_for_delete_lun',
timeout=LUN_DELETE_WAITTIME)
raise exception.HBSDError(message=msg)
else:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_lun_core(self, hostgroup, ldev):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_delete_lun, hostgroup, ldev, time.time())
loop.start(interval=LUN_DELETE_INTERVAL).wait()
def comm_delete_lun(self, hostgroups, ldev):
deleted_hostgroups = []
no_ldev_cnt = 0
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
try:
self.comm_delete_lun_core(hostgroup, ldev)
except exception.HBSDCmdError as ex:
no_ldev_cnt += 1
if ex.ret == EX_ENOOBJ:
if no_ldev_cnt != len(hostgroups):
continue
raise exception.HBSDNotFound
else:
raise
deleted_hostgroups.append({'port': port, 'gid': gid})
def _check_ldev_status(self, ldev, status):
opt = ('get ldev -ldev_id %s -check_status %s -time %s' %
(ldev, status, LDEV_STATUS_WAITTIME))
ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt)
return ret
# Don't remove a storage_syncronized decorator.
# It is need to avoid comm_add_ldev() and comm_delete_ldev() are
# executed concurrently.
@storage_synchronized
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
emulation = 'OPEN-V'
if is_vvol:
opt = ('add ldev -pool snap -ldev_id %d '
'-capacity %dG -emulation %s'
% (ldev, capacity, emulation))
else:
opt = ('add ldev -pool %d -ldev_id %d '
'-capacity %dG -emulation %s'
% (pool_id, ldev, capacity, emulation))
self.comm_reset_status()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
if self._check_ldev_status(ldev, "NML"):
msg = basic_lib.output_err(653, ldev=ldev)
raise exception.HBSDError(message=msg)
def comm_add_hostgrp(self, port, gid, host_grp_name):
opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid,
host_grp_name)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_del_hostgrp(self, port, gid, host_grp_name):
opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hbawwn(self, port, gid, wwn):
opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
@storage_synchronized
def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False):
tmp_hostgroups = hostgroups[:]
is_ok = False
used_list = []
lun = None
old_lun = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
self.add_used_hlun(port, gid, used_list)
lun = self._get_lun(port, gid, ldev)
# When 'lun' or 'old_lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
if old_lun is not None and old_lun != lun:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = lun
tmp_hostgroups.remove(hostgroup)
old_lun = lun
if is_once:
# When 'lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
return
elif len(used_list) < MAX_HLUN + 1:
break
else:
tmp_hostgroups.remove(hostgroup)
if tmp_hostgroups:
used_list = []
if not used_list:
lun = 0
elif lun is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
lun = i
break
else:
raise exception.HBSDNotFound
opt = None
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % (
port, gid, ldev, lun)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
is_ok = True
hostgroup['lun'] = lun
if is_once:
break
else:
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=lun, port=port, id=gid))
if not is_ok:
if stderr:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
# Don't remove a storage_syncronized decorator.
# It is need to avoid comm_add_ldev() and comm_delete_ldev() are
# executed concurrently.
@storage_synchronized
def comm_delete_ldev(self, ldev, is_vvol):
ret = -1
stdout = ""
stderr = ""
self.comm_reset_status()
opt = 'delete ldev -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INVALID_LUN_SSB, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
ret, stdout, stderr = self.comm_get_status()
if ret or self.get_command_error(stdout):
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_extend_ldev(self, ldev, old_size, new_size):
extend_size = new_size - old_size
opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_dp_pool(self, pool_id):
opt = 'get dp_pool'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
if int(shlex.split(line)[0]) == pool_id:
free_gb = int(shlex.split(line)[3]) / 1024
total_gb = int(shlex.split(line)[4]) / 1024
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
def comm_modify_ldev(self, ldev):
args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr))
def is_detected(self, port, wwn):
return self.comm_chk_login_wwn([wwn], port)
def discard_zero_page(self, ldev):
try:
self.comm_modify_ldev(ldev)
except Exception as ex:
LOG.warning('Failed to discard zero page: %s', ex)
def comm_add_snapshot(self, pvol, svol):
pool = self.conf.hitachi_thin_pool_id
copy_size = self.conf.hitachi_copy_speed
args = ('add snapshot -ldev_id %d %d -pool %d '
'-snapshot_name %s -copy_size %d'
% (pvol, svol, pool, SNAP_NAME, copy_size))
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_snapshot(self, ldev):
args = 'delete snapshot -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_modify_snapshot(self, ldev, op):
args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op))
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def _wait_for_snap_status(self, pvol, svol, status, timeout, start):
if (self.get_snap_pvol_status(pvol, svol) in status and
self.get_snap_svol_status(svol) in status):
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_snap_status', timuout=timeout)
raise exception.HBSDError(message=msg)
def wait_snap(self, pvol, svol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_snap_status, pvol,
svol, status, timeout, time.time())
loop.start(interval=interval).wait()
def comm_get_snapshot(self, ldev):
args = 'get snapshot -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def check_snap_count(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return
lines = stdout.splitlines()
if len(lines) >= MAX_SNAPSHOT_COUNT + 1:
msg = basic_lib.output_err(
615, copy_method=basic_lib.THIN, pvol=ldev)
raise exception.HBSDBusy(message=msg)
def get_snap_pvol_status(self, pvol, svol):
stdout = self.comm_get_snapshot(pvol)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[6]) == svol:
return STATUS_TABLE[line[2]]
else:
return basic_lib.SMPL
def get_snap_svol_status(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
line = shlex.split(lines[1])
return STATUS_TABLE[line[2]]
@horcm_synchronized
def create_horcmconf(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
serial = self.conf.hitachi_serial_number
filename = '/etc/horcm%d.conf' % inst
port = DEFAULT_PORT_BASE + inst
found = False
if not os.path.exists(filename):
file_str = """
HORCM_MON
#ip_address service poll(10ms) timeout(10ms)
127.0.0.1 %16d 6000 3000
HORCM_CMD
""" % port
else:
file_str = utils.read_file_as_root(filename)
lines = file_str.splitlines()
for line in lines:
if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line):
found = True
break
if not found:
insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial
file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)',
r'\1\n%s\n' % insert_str, file_str)
try:
utils.execute('tee', filename, process_input=file_str,
run_as_root=True)
except putils.ProcessExecutionError as ex:
msg = basic_lib.output_err(
632, file=filename, ret=ex.exit_code, err=ex.stderr)
raise exception.HBSDError(message=msg)
def comm_get_copy_grp(self):
ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp',
printflag=False)
if ret:
opt = 'raidcom get copy_grp'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun):
args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d'
% (copy_group, pvol_group, svol_group, mun))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_copy_grp(self, copy_group):
args = 'delete copy_grp -copy_grp_name %s' % copy_group
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_device_grp(self, group_name):
args = 'get device_grp -device_grp_name %s' % group_name
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_device_grp(self, group_name, ldev_name, ldev):
args = ('add device_grp -device_grp_name %s %s -ldev_id %d'
% (group_name, ldev_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_device_grp(self, group_name, ldev):
args = ('delete device_grp -device_grp_name %s -ldev_id %d'
% (group_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_paircreate(self, copy_group, ldev_name):
args = ('-g %s -d %s -split -fq quick -c %d -vl'
% (copy_group, ldev_name, self.conf.hitachi_copy_speed))
ret, stdout, stderr = self.exec_raidcom('paircreate', args)
if ret:
opt = 'paircreate %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairsplit(self, copy_group, ldev_name):
args = '-g %s -d %s -S' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairsplit', args)
if ret:
opt = 'pairsplit %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairevtwait(self, copy_group, ldev_name, check_svol):
if not check_svol:
option = '-nowait'
else:
option = '-nowaits'
args = '-g %s -d %s %s' % (copy_group, ldev_name, option)
ret, stdout, stderr = self.exec_raidcom('pairevtwait', args,
printflag=False)
if ret > 127:
opt = 'pairevtwait %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_pairdisplay(self, copy_group, ldev_name=None):
if not ldev_name:
args = '-g %s -CLI' % copy_group
else:
args = '-g %s -d %s -CLI' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairdisplay', args,
printflag=False)
if ret and ret not in NO_SUCH_DEVICE:
opt = 'pairdisplay %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret, stdout, stderr
def check_copy_grp(self, copy_group):
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
count = 0
for line in lines[1:]:
line = shlex.split(line)
if line[0] == copy_group:
count += 1
if count == 2:
break
return count
def check_device_grp(self, group_name, ldev, ldev_name=None):
stdout = self.comm_get_device_grp(group_name)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[2]) == ldev:
if not ldev_name:
return True
else:
return line[1] == ldev_name
else:
return False
def is_smpl(self, copy_group, ldev_name):
ret, stdout, stderr = self.comm_pairdisplay(copy_group,
ldev_name=ldev_name)
if not stdout:
return True
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] in [NOT_SET, 'SMPL']:
return True
else:
return False
def get_copy_groups(self):
copy_groups = []
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[0] in self.copy_groups and line[0] not in copy_groups:
copy_groups.append(line[0])
return copy_groups
def get_matched_copy_group(self, pvol, svol, ldev_name):
for copy_group in self.get_copy_groups():
pvol_group = '%sP' % copy_group
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
return copy_group
else:
return None
def get_paired_info(self, ldev, only_flag=False):
paired_info = {'pvol': None, 'svol': []}
pvol = None
is_svol = False
stdout = self.comm_get_snapshot(ldev)
if stdout:
lines = stdout.splitlines()
line = shlex.split(lines[1])
status = STATUS_TABLE.get(line[2], basic_lib.UNKN)
if line[1] == 'P-VOL':
pvol = ldev
svol = int(line[6])
else:
is_svol = True
pvol = int(line[6])
svol = ldev
if status == basic_lib.PSUS:
status = self.get_snap_pvol_status(pvol, svol)
svol_info = {'lun': svol, 'status': status, 'is_vvol': True}
paired_info['svol'].append(svol_info)
paired_info['pvol'] = pvol
if only_flag or is_svol:
return paired_info
for copy_group in self.get_copy_groups():
ldev_name = None
pvol_status = basic_lib.UNKN
svol_status = basic_lib.UNKN
ret, stdout, stderr = self.comm_pairdisplay(copy_group)
if not stdout:
continue
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] not in ['P-VOL', 'S-VOL']:
continue
ldev0 = int(line[8])
ldev1 = int(line[12])
if ldev not in [ldev0, ldev1]:
continue
ldev_name = line[1]
if line[9] == 'P-VOL':
pvol = ldev0
svol = ldev1
pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
else:
svol = ldev0
pvol = ldev1
svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
if svol == ldev:
is_svol = True
if not ldev_name:
continue
pvol_group = '%sP' % copy_group
pvol_ok = self.check_device_grp(pvol_group, pvol,
ldev_name=ldev_name)
svol_group = '%sS' % copy_group
svol_ok = self.check_device_grp(svol_group, svol,
ldev_name=ldev_name)
if pvol_ok and svol_ok:
if pvol_status == basic_lib.PSUS:
status = svol_status
else:
status = pvol_status
svol_info = {'lun': svol, 'status': status, 'is_vvol': False}
paired_info['svol'].append(svol_info)
if is_svol:
break
# When 'pvol' is 0, it should be true.
# So, it cannot remove 'is not None'.
if pvol is not None and paired_info['pvol'] is None:
paired_info['pvol'] = pvol
return paired_info
def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
self.comm_add_device_grp(pvol_group, ldev_name, pvol)
self.comm_add_device_grp(svol_group, ldev_name, svol)
nr_copy_groups = self.check_copy_grp(copy_group)
if nr_copy_groups == 1:
self.comm_delete_copy_grp(copy_group)
if nr_copy_groups != 2:
self.comm_add_copy_grp(copy_group, pvol_group, svol_group, mun)
def delete_pair_config(self, pvol, svol, copy_group, ldev_name):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
self.comm_delete_device_grp(pvol_group, pvol)
if self.check_device_grp(svol_group, svol, ldev_name=ldev_name):
self.comm_delete_device_grp(svol_group, svol)
def _wait_for_pair_status(self, copy_group, ldev_name,
status, timeout, check_svol, start):
if self.comm_pairevtwait(copy_group, ldev_name,
check_svol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timout=timeout)
raise exception.HBSDError(message=msg)
def wait_pair(self, copy_group, ldev_name, status, timeout,
interval, check_svol=False):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, copy_group, ldev_name,
status, timeout, check_svol, time.time())
loop.start(interval=interval).wait()
def comm_create_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_copy_check_interval
if not is_vvol:
restart = False
create = False
ldev_name = LDEV_NAME % (pvol, svol)
mun = 0
for mun in range(MAX_MUNS):
copy_group = self.copy_groups[mun]
pvol_group = '%sP' % copy_group
if not self.check_device_grp(pvol_group, pvol):
break
else:
msg = basic_lib.output_err(
615, copy_method=basic_lib.FULL, pvol=pvol)
raise exception.HBSDBusy(message=msg)
try:
self.add_pair_config(pvol, svol, copy_group, ldev_name, mun)
self.restart_pair_horcm()
restart = True
self.comm_paircreate(copy_group, ldev_name)
create = True
self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS],
timeout, interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS, basic_lib.COPY],
timeout, interval, check_svol=True)
except Exception:
with excutils.save_and_reraise_exception():
if create:
try:
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
LOG.warning('Failed to create pair: %s', ex)
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(
copy_group, ldev_name,
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning('Failed to create pair: %s', ex)
if self.is_smpl(copy_group, ldev_name):
try:
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
LOG.warning('Failed to create pair: %s', ex)
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
LOG.warning('Failed to restart horcm: %s', ex)
else:
self.check_snap_count(pvol)
self.comm_add_snapshot(pvol, svol)
try:
self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval)
self.comm_modify_snapshot(svol, 'create')
self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.comm_delete_snapshot(svol)
self.wait_snap(
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning('Failed to create pair: %s', ex)
def delete_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_async_copy_check_interval
if not is_vvol:
ldev_name = LDEV_NAME % (pvol, svol)
copy_group = self.get_matched_copy_group(pvol, svol, ldev_name)
if not copy_group:
return
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL],
timeout, interval)
finally:
if self.is_smpl(copy_group, ldev_name):
self.delete_pair_config(pvol, svol, copy_group, ldev_name)
else:
self.comm_delete_snapshot(svol)
self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval)
def comm_raidqry(self):
ret, stdout, stderr = self.exec_command('raidqry', '-h')
if ret:
opt = 'raidqry -h'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def get_comm_version(self):
stdout = self.comm_raidqry()
lines = stdout.splitlines()
return shlex.split(lines[1])[1]
def output_param_to_log(self, conf):
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info('\t%(name)-35s : %(value)s',
{'name': opt.name, 'value': value})
def create_lock_file(self):
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
serial = self.conf.hitachi_serial_number
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst)
horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst)
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
basic_lib.create_empty_file(raidcom_lock_file)
basic_lib.create_empty_file(raidcom_pair_lock_file)
basic_lib.create_empty_file(horcmgr_lock_file)
basic_lib.create_empty_file(resource_lock_file)
def connect_storage(self):
properties = utils.brick_get_connector_properties()
self.setup_horcmgr(properties['ip'])
def get_max_hostgroups(self):
"""return the maximum value of hostgroup id."""
return MAX_HOSTGROUPS
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun(port, gid, list)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'serial_number'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_serial_number:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
stdout = self.comm_get_ldev(ldev)
if not stdout:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
sts_line = vol_type = ""
vol_attrs = []
size = num_port = 1
lines = stdout.splitlines()
for line in lines:
if line.startswith("STS :"):
sts_line = line
elif line.startswith("VOL_TYPE :"):
vol_type = shlex.split(line)[2]
elif line.startswith("VOL_ATTR :"):
vol_attrs = shlex.split(line)[2:]
elif line.startswith("VOL_Capacity(BLK) :"):
size = int(shlex.split(line)[2])
elif line.startswith("NUM_PORT :"):
num_port = int(shlex.split(line)[2])
if 'NML' not in sts_line:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
if 'OPEN-V' not in vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
if 'HDP' not in vol_attrs:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
for vol_attr in vol_attrs:
if vol_attr == ':':
continue
if vol_attr in PAIR_TYPE:
msg = basic_lib.output_err(705, ldev=ldev)
raise exception.HBSDError(data=msg)
if vol_attr not in PERMITTED_TYPE:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
| apache-2.0 | 464,826,580,063,807,040 | 37.656458 | 79 | 0.533378 | false |
nschloe/quadpy | src/quadpy/c1/_gauss_patterson.py | 1 | 32328 | import numpy as np
from ..helpers import article
from ._gauss_legendre import gauss_legendre
from ._helpers import C1Scheme
source = article(
authors=["T.N.L. Patterson"],
title="The optimum addition of points to quadrature formulae",
journal="Math. Comp.",
volume="22",
year="1968",
pages="847-856",
url="https://doi.org/10.1090/S0025-5718-68-99866-9",
)
def gauss_patterson(index):
# Gauss-Patterson quadrature.
# <https://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_patterson/quadrature_rules_patterson.html>
degree = 3 * 2 ** index - 1 if index > 0 else 1
points = np.sort(_get_points(index))
# weights
if index < 6:
weights = _get_weights(points)
elif index == 6:
# _get_weights is flawed with round-off for index > 5. Use explicit values from
# <https://people.sc.fsu.edu/~jburkardt/datasets/quadrature_rules_patterson/gp_o127_w.txt>.
s = np.array(
[
5.053609520786252e-05,
1.807395644453884e-04,
3.777466463269846e-04,
6.326073193626335e-04,
9.383698485423815e-04,
1.289524082610417e-03,
1.681142865421470e-03,
2.108815245726633e-03,
2.568764943794020e-03,
3.057753410175531e-03,
3.572892783517299e-03,
4.111503978654693e-03,
4.671050372114322e-03,
5.249123454808859e-03,
5.843449875835640e-03,
6.451900050175737e-03,
7.072489995433555e-03,
7.703375233279742e-03,
8.342838753968157e-03,
8.989275784064136e-03,
9.641177729702537e-03,
1.029711695795636e-02,
1.095573338783790e-02,
1.161572331995513e-02,
1.227583056008277e-02,
1.293483966360737e-02,
1.359157100976555e-02,
1.424487737291678e-02,
1.489364166481518e-02,
1.553677555584398e-02,
1.617321872957772e-02,
1.680193857410386e-02,
1.742193015946417e-02,
1.803221639039129e-02,
1.863184825613879e-02,
1.921990512472777e-02,
1.979549504809750e-02,
2.035775505847216e-02,
2.090585144581202e-02,
2.143898001250387e-02,
2.195636630531782e-02,
2.245726582681610e-02,
2.294096422938775e-02,
2.340677749531401e-02,
2.385405210603854e-02,
2.428216520333660e-02,
2.469052474448768e-02,
2.507856965294977e-02,
2.544576996546477e-02,
2.579162697602423e-02,
2.611567337670610e-02,
2.641747339505826e-02,
2.669662292745036e-02,
2.695274966763303e-02,
2.718551322962479e-02,
2.739460526398143e-02,
2.757974956648187e-02,
2.774070217827968e-02,
2.787725147661370e-02,
2.798921825523816e-02,
2.807645579381725e-02,
2.813884991562715e-02,
2.817631903301660e-02,
]
)
weights = np.concatenate([s, np.array([0.2818881418019236e-01]), s[::-1]])
elif index == 7:
s = np.array(
[
0.69379364324108267170e-05,
0.25157870384280661489e-04,
0.53275293669780613125e-04,
0.90372734658751149261e-04,
0.13575491094922871973e-03,
0.18887326450650491366e-03,
0.24921240048299729402e-03,
0.31630366082226447689e-03,
0.38974528447328229322e-03,
0.46918492424785040975e-03,
0.55429531493037471492e-03,
0.64476204130572477933e-03,
0.74028280424450333046e-03,
0.84057143271072246365e-03,
0.94536151685852538246e-03,
0.10544076228633167722e-02,
0.11674841174299594077e-02,
0.12843824718970101768e-02,
0.14049079956551446427e-02,
0.15288767050877655684e-02,
0.16561127281544526052e-02,
0.17864463917586498247e-02,
0.19197129710138724125e-02,
0.20557519893273465236e-02,
0.21944069253638388388e-02,
0.23355251860571608737e-02,
0.24789582266575679307e-02,
0.26245617274044295626e-02,
0.27721957645934509940e-02,
0.29217249379178197538e-02,
0.30730184347025783234e-02,
0.32259500250878684614e-02,
0.33803979910869203823e-02,
0.35362449977167777340e-02,
0.36933779170256508183e-02,
0.38516876166398709241e-02,
0.40110687240750233989e-02,
0.41714193769840788528e-02,
0.43326409680929828545e-02,
0.44946378920320678616e-02,
0.46573172997568547773e-02,
0.48205888648512683476e-02,
0.49843645647655386012e-02,
0.51485584789781777618e-02,
0.53130866051870565663e-02,
0.54778666939189508240e-02,
0.56428181013844441585e-02,
0.58078616599775673635e-02,
0.59729195655081658049e-02,
0.61379152800413850435e-02,
0.63027734490857587172e-02,
0.64674198318036867274e-02,
0.66317812429018878941e-02,
0.67957855048827733948e-02,
0.69593614093904229394e-02,
0.71224386864583871532e-02,
0.72849479805538070639e-02,
0.74468208324075910174e-02,
0.76079896657190565832e-02,
0.77683877779219912200e-02,
0.79279493342948491103e-02,
0.80866093647888599710e-02,
0.82443037630328680306e-02,
0.84009692870519326354e-02,
0.85565435613076896192e-02,
0.87109650797320868736e-02,
0.88641732094824942641e-02,
0.90161081951956431600e-02,
0.91667111635607884067e-02,
0.93159241280693950932e-02,
0.94636899938300652943e-02,
0.96099525623638830097e-02,
0.97546565363174114611e-02,
0.98977475240487497440e-02,
0.10039172044056840798e-01,
0.10178877529236079733e-01,
0.10316812330947621682e-01,
0.10452925722906011926e-01,
0.10587167904885197931e-01,
0.10719490006251933623e-01,
0.10849844089337314099e-01,
0.10978183152658912470e-01,
0.11104461134006926537e-01,
0.11228632913408049354e-01,
0.11350654315980596602e-01,
0.11470482114693874380e-01,
0.11588074033043952568e-01,
0.11703388747657003101e-01,
0.11816385890830235763e-01,
0.11927026053019270040e-01,
0.12035270785279562630e-01,
0.12141082601668299679e-01,
0.12244424981611985899e-01,
0.12345262372243838455e-01,
0.12443560190714035263e-01,
0.12539284826474884353e-01,
0.12632403643542078765e-01,
0.12722884982732382906e-01,
0.12810698163877361967e-01,
0.12895813488012114694e-01,
0.12978202239537399286e-01,
0.13057836688353048840e-01,
0.13134690091960152836e-01,
0.13208736697529129966e-01,
0.13279951743930530650e-01,
0.13348311463725179953e-01,
0.13413793085110098513e-01,
0.13476374833816515982e-01,
0.13536035934956213614e-01,
0.13592756614812395910e-01,
0.13646518102571291428e-01,
0.13697302631990716258e-01,
0.13745093443001896632e-01,
0.13789874783240936517e-01,
0.13831631909506428676e-01,
0.13870351089139840997e-01,
0.13906019601325461264e-01,
0.13938625738306850804e-01,
0.13968158806516938516e-01,
0.13994609127619079852e-01,
0.14017968039456608810e-01,
0.14038227896908623303e-01,
0.14055382072649964277e-01,
0.14069424957813575318e-01,
0.14080351962553661325e-01,
0.14088159516508301065e-01,
0.14092845069160408355e-01,
]
)
weights = np.concatenate([s, np.array([0.14094407090096179347e-01]), s[::-1]])
else:
assert index == 8
s = np.array(
[
0.945715933950007048827e-06,
0.345456507169149134898e-05,
0.736624069102321668857e-05,
0.125792781889592743525e-04,
0.190213681905875816679e-04,
0.266376412339000901358e-04,
0.353751372055189588628e-04,
0.451863674126296143105e-04,
0.560319507856164252140e-04,
0.678774554733972416227e-04,
0.806899228014035293851e-04,
0.944366322532705527066e-04,
0.109085545645741522051e-03,
0.124606200241498368482e-03,
0.140970302204104791413e-03,
0.158151830411132242924e-03,
0.176126765545083195474e-03,
0.194872642236641146532e-03,
0.214368090034216937149e-03,
0.234592462123925204879e-03,
0.255525589595236862014e-03,
0.277147657465187357459e-03,
0.299439176850911730874e-03,
0.322381020652862389664e-03,
0.345954492129903871350e-03,
0.370141402122251665232e-03,
0.394924138246873704434e-03,
0.420285716355361231823e-03,
0.446209810101403247488e-03,
0.472680758429262691232e-03,
0.499683553312800484519e-03,
0.527203811431658386125e-03,
0.555227733977307579715e-03,
0.583742058714979703847e-03,
0.612734008012225209294e-03,
0.642191235948505088403e-03,
0.672101776960108194646e-03,
0.702453997827572321358e-03,
0.733236554224767912055e-03,
0.764438352543882784191e-03,
0.796048517297550871506e-03,
0.828056364077226302608e-03,
0.860451377808527848128e-03,
0.893223195879324912340e-03,
0.926361595613111283368e-03,
0.959856485506936206261e-03,
0.993697899638760857945e-03,
0.102787599466367326179e-02,
0.106238104885340071375e-02,
0.109720346268191941940e-02,
0.113233376051597664917e-02,
0.116776259302858043685e-02,
0.120348074001265964881e-02,
0.123947911332878396534e-02,
0.127574875977346947345e-02,
0.131228086370221478128e-02,
0.134906674928353113127e-02,
0.138609788229672549700e-02,
0.142336587141720519900e-02,
0.146086246895890987689e-02,
0.149857957106456636214e-02,
0.153650921735128916170e-02,
0.157464359003212166189e-02,
0.161297501254393423070e-02,
0.165149594771914570655e-02,
0.169019899554346019117e-02,
0.172907689054461607168e-02,
0.176812249885838886701e-02,
0.180732881501808930079e-02,
0.184668895851282540913e-02,
0.188619617015808475394e-02,
0.192584380831993546204e-02,
0.196562534503150547732e-02,
0.200553436203751169944e-02,
0.204556454679958293446e-02,
0.208570968849203942640e-02,
0.212596367401472533045e-02,
0.216632048404649142727e-02,
0.220677418916003329194e-02,
0.224731894601603393082e-02,
0.228794899365195972378e-02,
0.232865864987842738864e-02,
0.236944230779380495146e-02,
0.241029443242563417382e-02,
0.245120955750556483923e-02,
0.249218228238276930060e-02,
0.253320726907925325750e-02,
0.257427923948908888092e-02,
0.261539297272236109225e-02,
0.265654330259352828314e-02,
0.269772511525294586667e-02,
0.273893334695947541201e-02,
0.278016298199139435045e-02,
0.282140905069222207923e-02,
0.286266662764757868253e-02,
0.290393082998878368175e-02,
0.294519681581857582284e-02,
0.298645978275408290247e-02,
0.302771496658198544480e-02,
0.306895764002069252174e-02,
0.311018311158427546158e-02,
0.315138672454287935858e-02,
0.319256385597434736790e-02,
0.323370991590184336368e-02,
0.327482034651233969564e-02,
0.331589062145094394706e-02,
0.335691624518616761342e-02,
0.339789275244138669739e-02,
0.343881570768790591876e-02,
0.347968070469521146972e-02,
0.352048336613417922682e-02,
0.356121934322919357659e-02,
0.360188431545532431869e-02,
0.364247399027690353194e-02,
0.368298410292403911967e-02,
0.372341041620379550870e-02,
0.376374872034296338241e-02,
0.380399483285952829161e-02,
0.384414459846013158917e-02,
0.388419388896099560998e-02,
0.392413860322995774660e-02,
0.396397466714742455513e-02,
0.400369803358421688562e-02,
0.404330468239442998549e-02,
0.408279062042157838350e-02,
0.412215188151643401528e-02,
0.416138452656509745764e-02,
0.420048464352596631772e-02,
0.423944834747438184434e-02,
0.427827178065384480959e-02,
0.431695111253279479928e-02,
0.435548253986604343679e-02,
0.439386228676004195260e-02,
0.443208660474124713206e-02,
0.447015177282692726900e-02,
0.450805409759782158001e-02,
0.454578991327213285488e-02,
0.458335558178039420335e-02,
0.462074749284080687482e-02,
0.465796206403469754658e-02,
0.469499574088179046532e-02,
0.473184499691503264714e-02,
0.476850633375474925263e-02,
0.480497628118194150483e-02,
0.484125139721057135214e-02,
0.487732826815870573054e-02,
0.491320350871841897367e-02,
0.494887376202437487201e-02,
0.498433569972103029914e-02,
0.501958602202842039909e-02,
0.505462145780650125058e-02,
0.508943876461803986674e-02,
0.512403472879005351831e-02,
0.515840616547381084096e-02,
0.519254991870341614863e-02,
0.522646286145300596306e-02,
0.526014189569259311205e-02,
0.529358395244259896547e-02,
0.532678599182711857974e-02,
0.535974500312596681161e-02,
0.539245800482555593606e-02,
0.542492204466865704951e-02,
0.545713419970309863995e-02,
0.548909157632945623482e-02,
0.552079131034778706457e-02,
0.555223056700346326850e-02,
0.558340654103215637610e-02,
0.561431645670402467678e-02,
0.564495756786715368885e-02,
0.567532715799029830087e-02,
0.570542254020497332312e-02,
0.573524105734693719020e-02,
0.576478008199711142954e-02,
0.579403701652197628421e-02,
0.582300929311348057702e-02,
0.585169437382850155033e-02,
0.588008975062788803205e-02,
0.590819294541511788161e-02,
0.593600151007459827614e-02,
0.596351302650963502011e-02,
0.599072510668009471472e-02,
0.601763539263978131522e-02,
0.604424155657354634589e-02,
0.607054130083414983949e-02,
0.609653235797888692923e-02,
0.612221249080599294931e-02,
0.614757949239083790214e-02,
0.617263118612191922727e-02,
0.619736542573665996342e-02,
0.622178009535701763157e-02,
0.624587310952490748541e-02,
0.626964241323744217671e-02,
0.629308598198198836688e-02,
0.631620182177103938227e-02,
0.633898796917690165912e-02,
0.636144249136619145314e-02,
0.638356348613413709795e-02,
0.640534908193868098342e-02,
0.642679743793437438922e-02,
0.644790674400605734710e-02,
0.646867522080231481688e-02,
0.648910111976869964292e-02,
0.650918272318071200827e-02,
0.652891834417652442012e-02,
0.654830632678944064054e-02,
0.656734504598007641819e-02,
0.658603290766824937794e-02,
0.660436834876456498276e-02,
0.662234983720168509457e-02,
0.663997587196526532519e-02,
0.665724498312454708217e-02,
0.667415573186258997654e-02,
0.669070671050613006584e-02,
0.670689654255504925648e-02,
0.672272388271144108036e-02,
0.673818741690825799086e-02,
0.675328586233752529078e-02,
0.676801796747810680683e-02,
0.678238251212300746082e-02,
0.679637830740619795480e-02,
0.681000419582894688374e-02,
0.682325905128564571420e-02,
0.683614177908911221841e-02,
0.684865131599535812903e-02,
0.686078663022780697951e-02,
0.687254672150094831613e-02,
0.688393062104341470995e-02,
0.689493739162046825872e-02,
0.690556612755588354803e-02,
0.691581595475321433825e-02,
0.692568603071643155621e-02,
0.693517554456992049848e-02,
0.694428371707782549438e-02,
0.695300980066273063177e-02,
0.696135307942366551493e-02,
0.696931286915342540213e-02,
0.697688851735519545845e-02,
0.698407940325846925786e-02,
0.699088493783425207545e-02,
0.699730456380953992594e-02,
0.700333775568106572820e-02,
0.700898401972830440494e-02,
0.701424289402572916425e-02,
0.701911394845431165171e-02,
0.702359678471225911031e-02,
0.702769103632498213858e-02,
0.703139636865428709508e-02,
0.703471247890678765907e-02,
0.703763909614153052319e-02,
0.704017598127683066242e-02,
0.704232292709631209597e-02,
0.704407975825415053266e-02,
0.704544633127951476780e-02,
0.704642253458020417748e-02,
0.704700828844548013730e-02,
]
)
weights = np.concatenate([s, np.array([0.704720354504808967346e-02]), s[::-1]])
return C1Scheme(f"Gauss-Patterson {index}", degree, weights, points, source)
def _get_points(index):
def _pm(a):
a = np.asarray(a)
return np.concatenate([-a[::-1], +a])
if index == 0:
return np.array([0.0])
if index == 1:
new_points = [0.7745966692414834]
elif index == 2:
new_points = [0.4342437493468025, 0.9604912687080203]
elif index == 3:
new_points = [
0.2233866864289669,
0.6211029467372264,
0.8884592328722570,
0.9938319632127550,
]
elif index == 4:
new_points = [
0.1124889431331866,
0.3311353932579768,
0.5313197436443756,
0.7024962064915271,
0.8367259381688688,
0.9296548574297401,
0.9815311495537401,
0.9990981249676676,
]
elif index == 5:
new_points = [
0.5634431304659279e-01,
0.1682352515522075,
0.2777498220218243,
0.3833593241987304,
0.4836180269458411,
0.5771957100520458,
0.6629096600247806,
0.7397560443526947,
0.8069405319502176,
0.8639079381936905,
0.9103711569570043,
0.9463428583734029,
0.9721828747485818,
0.9886847575474295,
0.9972062593722220,
0.9998728881203576,
]
elif index == 6:
new_points = [
0.2818464894974569e-01,
0.8445404008371088e-01,
0.1404242331525602,
0.1958975027111002,
0.2506787303034832,
0.3045764415567140,
0.3574038378315322,
0.4089798212298887,
0.4591300119898323,
0.5076877575337166,
0.5544951326319325,
0.5994039302422429,
0.6422766425097595,
0.6829874310910792,
0.7214230853700989,
0.7574839663805136,
0.7910849337998483,
0.8221562543649804,
0.8506444947683502,
0.8765134144847053,
0.8997448997769401,
0.9203400254700124,
0.9383203977795929,
0.9537300064257611,
0.9666378515584165,
0.9771415146397057,
0.9853714995985203,
0.9914957211781061,
0.9957241046984072,
0.9983166353184074,
0.9995987996719107,
0.9999824303548916,
]
elif index == 7:
new_points = [
0.014093886410782462614e00,
0.042269164765363603212e00,
0.070406976042855179063e00,
0.098482396598119202090e00,
0.12647058437230196685e00,
0.15434681148137810869e00,
0.18208649675925219825e00,
0.20966523824318119477e00,
0.23705884558982972721e00,
0.26424337241092676194e00,
0.29119514851824668196e00,
0.31789081206847668318e00,
0.34430734159943802278e00,
0.37042208795007823014e00,
0.39621280605761593918e00,
0.42165768662616330006e00,
0.44673538766202847374e00,
0.47142506587165887693e00,
0.49570640791876146017e00,
0.51955966153745702199e00,
0.54296566649831149049e00,
0.56590588542365442262e00,
0.58836243444766254143e00,
0.61031811371518640016e00,
0.63175643771119423041e00,
0.65266166541001749610e00,
0.67301883023041847920e00,
0.69281376977911470289e00,
0.71203315536225203459e00,
0.73066452124218126133e00,
0.74869629361693660282e00,
0.76611781930376009072e00,
0.78291939411828301639e00,
0.79909229096084140180e00,
0.81462878765513741344e00,
0.82952219463740140018e00,
0.84376688267270860104e00,
0.85735831088623215653e00,
0.87029305554811390585e00,
0.88256884024734190684e00,
0.89418456833555902286e00,
0.90514035881326159519e00,
0.91543758715576504064e00,
0.92507893290707565236e00,
0.93406843615772578800e00,
0.94241156519108305981e00,
0.95011529752129487656e00,
0.95718821610986096274e00,
0.96364062156981213252e00,
0.96948465950245923177e00,
0.97473445975240266776e00,
0.97940628167086268381e00,
0.98351865757863272876e00,
0.98709252795403406719e00,
0.99015137040077015918e00,
0.99272134428278861533e00,
0.99483150280062100052e00,
0.99651414591489027385e00,
0.99780535449595727456e00,
0.99874561446809511470e00,
0.99938033802502358193e00,
0.99976049092443204733e00,
0.99994399620705437576e00,
0.99999759637974846462e00,
]
else:
assert index == 8
new_points = [
0.704713845933674648514e-02,
0.211398533783310883350e-01,
0.352278828084410232603e-01,
0.493081047908686267156e-01,
0.633773999173222898797e-01,
0.774326523498572825675e-01,
0.914707508403553909095e-01,
0.105488589749541988533,
0.119483070065440005133,
0.133451100421161601344,
0.147389598111939940054,
0.161295490111305257361,
0.175165714086311475707,
0.188997219411721861059,
0.202786968183064697557,
0.216531936228472628081,
0.230229114119222177156,
0.243875508178893021593,
0.257468141491069790481,
0.271004054905512543536,
0.284480308042725577496,
0.297893980296857823437,
0.311242171836871800300,
0.324522004605921855207,
0.337730623318886219621,
0.350865196458001209011,
0.363922917266549655269,
0.376901004740559344802,
0.389796704618470795479,
0.402607290368737092671,
0.415330064175321663764,
0.427962357921062742583,
0.440501534168875795783,
0.452944987140767283784,
0.465290143694634735858,
0.477534464298829155284,
0.489675444004456155436,
0.501710613415391878251,
0.513637539655988578507,
0.525453827336442687395,
0.537157119515795115982,
0.548745098662529448608,
0.560215487612728441818,
0.571566050525742833992,
0.582794593837318850840,
0.593898967210121954393,
0.604877064481584353319,
0.615726824608992638014,
0.626446232611719746542,
0.637033320510492495071,
0.647486168263572388782,
0.657802904699713735422,
0.667981708447749702165,
0.678020808862644517838,
0.687918486947839325756,
0.697673076273711232906,
0.707282963891961103412,
0.716746591245747095767,
0.726062455075389632685,
0.735229108319491547663,
0.744245161011347082309,
0.753109281170558142523,
0.761820195689839149173,
0.770376691217076824278,
0.778777615032822744702,
0.787021875923539422170,
0.795108445051100526780,
0.803036356819268687782,
0.810804709738146594361,
0.818412667287925807395,
0.825859458783650001088,
0.833144380243172624728,
0.840266795261030442350,
0.847226135891580884381,
0.854021903545468625813,
0.860653669904299969802,
0.867121077859315215614,
0.873423842480859310192,
0.879561752026556262568,
0.885534668997285008926,
0.891342531251319871666,
0.896985353188316590376,
0.902463227016165675048,
0.907776324115058903624,
0.912924896514370590080,
0.917909278499077501636,
0.922729888363349241523,
0.927387230329536696843,
0.931881896650953639345,
0.936214569916450806625,
0.940386025573669721370,
0.944397134685866648591,
0.948248866934137357063,
0.951942293872573589498,
0.955478592438183697574,
0.958859048710200221356,
0.962085061904651475741,
0.965158148579915665979,
0.968079947017759947964,
0.970852221732792443256,
0.973476868052506926773,
0.975955916702011753129,
0.978291538324758539526,
0.980486047876721339416,
0.982541908851080604251,
0.984461737328814534596,
0.986248305913007552681,
0.987904547695124280467,
0.989433560520240838716,
0.990838611958294243677,
0.992123145530863117683,
0.993290788851684966211,
0.994345364356723405931,
0.995290903148810302261,
0.996131662079315037786,
0.996872143485260161299,
0.997517116063472399965,
0.998071634524930323302,
0.998541055697167906027,
0.998931050830810562236,
0.999247618943342473599,
0.999497112467187190535,
0.999686286448317731776,
0.999822363679787739196,
0.999913081144678282800,
0.999966730098486276883,
0.999992298136257588028,
0.999999672956734384381,
]
return np.concatenate([_get_points(index - 1), _pm(new_points)])
def _get_weights(pts):
"""Given a number of points in [-1, 1], according to
T.N.L. Patterson,
On some Gauss and Lobatto based integration formulae,
Math. Comp. 22 (1968), 877-881,
https://doi.org/10.2307/2004589,
one can compute the corresponding weights. One reads there:
> Thus the weights of an n-point integration formula [...] are given by
>
> omega_i = int_{-1}^{1} L_i(x) dx,
>
> (where L_i is the Lagrange polynomial for the point x_i). These weights can be
> evaluated exactly in a numerically stable fashion using a Gauss formula with n/2
> points when n is even and (n + 1)/2 points > when n is odd.
"""
n = len(pts)
# Gauss-Legendre of order k integrates polynomials of degree 2*k-1 exactly. L has
# degree n-1, so k needs to be n/2 if n is even, and (n+1)/2 if n is odd.
k = (n // 2) - 1 if n % 2 == 0 else (n + 1) // 2
return np.array(
[
gauss_legendre(k).integrate(
# Normalized Lagrange polynomial: Degree n-1, 0 at all x_j, 1 at x_i.
lambda x: np.prod(
[(x - pts[j]) / (pts[i] - pts[j]) for j in range(n) if j != i],
axis=0,
).reshape(np.asarray(x).shape),
np.array([-1, 1]),
)
for i in range(n)
]
)
| mit | 722,025,886,675,733,100 | 38.472527 | 112 | 0.556607 | false |
jds2001/sos | sos/plugins/etcd.py | 1 | 2227 | # Copyright (C) 2015 Red Hat, Inc. Neependra Khare <[email protected]>
# Copyright (C) 2015 Red Hat, Inc. Bryn M. Reeves <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
class etcd(Plugin, RedHatPlugin):
"""etcd plugin
"""
packages = ('etcd',)
cmd = 'etcdctl'
def setup(self):
etcd_url = self.get_etcd_url()
self.add_copy_spec('/etc/etcd')
subcmds = [
'--version',
'member list',
'cluster-health',
'ls --recursive',
]
self.add_cmd_output(['%s %s' % (self.cmd, sub) for sub in subcmd])
urls = [
'/v2/stats/leader',
'/v2/stats/self',
'/v2/stats/store',
]
if etcd_url:
self.add_cmd_output(['curl -s %s%s' % (etcd_url, u) for u in urls])
self.add_cmd_output("ls -lR /var/lib/etcd/")
def get_etcd_url(self):
try:
with open('/etc/etcd/etcd.conf', 'r') as ef:
for line in ef:
if line.startswith('ETCD_LISTEN_CLIENT_URLS'):
return line.split('=')[1].replace('"', '').strip()
# If we can't read etcd.conf, assume defaults by etcd version
except:
ver = self.policy().package_manager.get_pkg_list()['etcd']
ver = ver['version'][0]
if ver == '2':
return 'http://localhost:4001'
if ver == '3':
return 'http://localhost:2379'
# vim: et ts=5 sw=4
| gpl-2.0 | 9,004,921,074,959,026,000 | 30.814286 | 79 | 0.585092 | false |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_point.py | 1 | 5599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
cc_plugin_ncei/ncei_point.py
'''
from compliance_checker.base import BaseCheck
from cc_plugin_ncei.ncei_base import TestCtx, NCEI1_1Check, NCEI2_0Check
from cc_plugin_ncei import util
class NCEIPointBase(BaseCheck):
_cc_spec = 'ncei-point'
valid_feature_types = [
'station',
'point'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a point dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are point feature types')
t = util.get_time_variable(dataset)
# Exit prematurely
if not t:
required_ctx.assert_true(False, 'A dimension representing time is required for point feature types')
return required_ctx.to_result()
t_dims = dataset.variables[t].dimensions
o = None or (t_dims and t_dims[0])
message = '{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_point(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable, o, o)
)
return required_ctx.to_result()
class NCEIPoint1_1(NCEI1_1Check, NCEIPointBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF Point template version 1.1 '
'(found at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/point.cdl). The NCEI '
'version 1.1 templates are based on “feature types”, as identified by Unidata and CF, '
'and conform to ACDD version 1.0 and CF version 1.6. You can find more information about '
'the version 1.1 templates at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/. This '
'test is specifically for the Point feature type which is typically used for a single '
'data point with one or more recorded observations that have no temporal or spatial '
'relationship (where each observation equals one point in time and space).')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/point.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_Point_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Verifies that the dataset contains the NCEI required and highly recommended global attributes
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Point',
'cdm_data_type attribute must be set to Point'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'point',
'featureType attribute must be set to point'
)
results.append(required_ctx.to_result())
return results
class NCEIPoint2_0(NCEI2_0Check, NCEIPointBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF Point template'
'version 2.0 (found at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/point.cdl). The NCEI '
'version 2.0 templates are based on “feature types”, as identified by Unidata and CF, and '
'conform to ACDD version 1.3 and CF version 1.6. You can find more information about the '
'version 2.0 templates at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/. This test is '
'specifically for the Point feature type which is typically used for a single data point with '
'one or more recorded observations that have no temporal or spatial relationship (where each '
'observation equals one point in time and space).')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/point.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_Point_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Verifies that the dataset contains the NCEI required and highly recommended global attributes
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Point',
'cdm_data_type attribute must be set to Point'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'point',
'featureType attribute must be set to point'
)
results.append(required_ctx.to_result())
return results
| apache-2.0 | 5,135,783,404,558,934,000 | 41.679389 | 145 | 0.635664 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_express_route_cross_connection_peerings_operations.py | 1 | 22280 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| mit | 5,977,565,763,108,546,000 | 50.934732 | 236 | 0.660189 | false |
xxd3vin/spp-sdk | tools/script/blender/x3d2ocs.py | 1 | 7210 | import sys,os,bpy
import urllib
from urllib import *
import urllib.request
from xml.etree import ElementTree
print("run start")
render_url = "http://192.168.3.78:8088/content/test/monkey"
x3d_url = "http://192.168.3.78:8088/content/test/monkey.x3d"
x3d_scene_name = "monkey"
def dowload_x3d(x3d_url):
print("start download...")
urllib.request.urlretrieve(x3d_url,("C:/rendertmp/" + x3d_scene_name + ".x3d"))
print("download end...")
def setmeshname():
for obj in bpy.data.objects:
if(obj.type == "MESH"):
obj.data.name = obj.name
def load_x3d():
bpy.ops.import_scene.x3d(filepath="C:\\rendertmp\\" + x3d_scene_name + ".x3d")
bpy.ops.export_scene.obj(filepath="C:\\rendertmp\\" + x3d_scene_name + ".obj")
# if __name__=="__main__":
# main(sys.argv[1:])
def load_objnode(ocsnodes):
meshtxml = ElementTree.parse(r"c:\\mesht.xml")
ocsmeshnode = meshtxml.getiterator("Node")[0]
inputnodepins = ocsmeshnode.getiterator("inputnodepins")[0]
matindex = 0
for imat in bpy.data.materials:
NodePin = ElementTree.Element("NodePin")
typename = ElementTree.Element("typename")
typename.text = imat.name
NodePin.append(typename)
id = ElementTree.Element("id")
#print(dir(imat))
#print(imat.pass_index)
id.text = ("%s" % matindex)
matindex = matindex + 1
NodePin.append(id)
pintype = ElementTree.Element("pintype")
pintype.text = "20005"
NodePin.append(pintype)
hasinternalnodegraph = ElementTree.Element("hasinternalnodegraph")
hasinternalnodegraph.text = "false"
NodePin.append(hasinternalnodegraph)
inputnodepins.append(NodePin)
matindex = 0
for imat in bpy.data.materials:
#print(imat.name)
matname = imat.name
#print(matname.find('.'))
if matname.find('.') > 0:
matname = matname[0 : (matname.find('.'))]
#print(matname)
tmpmatxml = ElementTree.parse(r"c:\rendertmp\\"+ matname +".ocm")
tmpmatnode = tmpmatxml.getiterator("OCS_1_0_23_Macro")[0].getiterator("Node")[0]
tmpmatnode.getiterator("name")[0].text = imat.name
#set id
if tmpmatnode.getiterator("typename")[0].text == "material macro":
tmpmatnode.getiterator("id")[0].text = ("%s" % (matindex + 4))
matindex = matindex + 1
ocsnodes.append(tmpmatnode)
ocsmeshnode.getiterator("name")[0].text = x3d_scene_name + ".obj"
ocsmeshnode.getiterator("linkedfilename")[0].text = x3d_scene_name + ".obj"
ocsnodes.append(ocsmeshnode)
def export_ocs():
ocsxml = ElementTree.parse(r"c:\\t1.ocs")
scenert = ocsxml.getiterator("OCS_1_0_23_Scene")
#print(scenert[0].tag)
#print(dir(ocsxml))
#get scene node
if scenert[0].tag == "OCS_1_0_23_Scene" :
snode = scenert[0].getiterator("Node")
#print(snode[0].tag)
if snode[0].tag == "Node" :
nname = snode[0].getiterator("name")
#print(nname[0].tag)
#print(nname[0].text)
childgraph = snode[0].getiterator("childgraph")
NodeGraph = childgraph[0].getiterator("NodeGraph")
NodeGraphnodes = NodeGraph[0].getiterator("nodes")[0]
load_objnode(NodeGraphnodes)
# #get camera pos node
# for inode in NodeGraphnodes.getiterator("Node") :
# inodename = inode.getiterator("name")[0].text
# #get Preview Configuration node
# if inodename == "Preview Configuration":
# pvcnodes = inode.getiterator("Node")
# for ipnode in pvcnodes :
# ipnodename = ipnode.getiterator("name")[0].text
# #print(ipnodename)
# #get Mesh Preview Camera node
# if ipnodename == "Mesh Preview Camera" :
# nodepins = ipnode.getiterator("NodePin")
# for inp in nodepins :
# inpname = inp.getiterator("typename")[0].text
# if inpname == "pos" :
# #print(inp.getiterator("valuexyz")[0].text)
# #set cam pos
# campos = cameranode.getiterator("position")[0].attrib["x"] + " " + cameranode.getiterator("position")[0].attrib["y"] + " " + cameranode.getiterator("position")[0].attrib["z"]
# #print(campos)
# inp.getiterator("valuexyz")[0].text = campos
# if inpname == "target" :
# camfocus = cameranode.getiterator("focus")[0].attrib["x"] + " " + cameranode.getiterator("focus")[0].attrib["y"] + " " + cameranode.getiterator("focus")[0].attrib["z"]
# #print(campos)
# inp.getiterator("valuexyz")[0].text = camfocus
# if inpname == "autofocus" :
# inp.getiterator("value")[0].text = "true"
# if ipnodename == "Mesh Preview Resolution" :
# valuexy = ipnode.getiterator("valuexy")[0]
# #print(ipnode.getiterator("typename")[0].text)
# valuexy.text = cameranode.getiterator("width")[0].attrib["value"] + " " + cameranode.getiterator("height")[0].attrib["value"]
# loadobj(NodeGraphnodes)
# #NodeGraphnodes.appendChild(addnode)
#set line
nodepinconnections = ocsxml.getiterator("nodepinconnections")[len(ocsxml.getiterator("nodepinconnections")) - 1]
matindex = 0
for imat in bpy.data.materials:
nodepinconnection = ElementTree.Element("nodepinconnection")
sourceid = ElementTree.Element("sourceid")
sourceid.text = ("%s" % (matindex + 4))
nodepinconnection.append(sourceid)
sourcepinid = ElementTree.Element("sourcepinid")
sourcepinid.text = "0"
nodepinconnection.append(sourcepinid)
destid = ElementTree.Element("destid")
destid.text = "3"
nodepinconnection.append(destid)
destpinid = ElementTree.Element("destpinid")
destpinid.text = ("%s" % (matindex))
matindex = matindex + 1
nodepinconnection.append(destpinid)
nodepinconnections.append(nodepinconnection)
ocsxml.write(r"c:\\rendertmp\\t1t.ocs","utf-8")
dowload_x3d(x3d_url)
load_x3d()
export_ocs()
print("run end") | mit | 8,665,271,637,099,151,000 | 35.979487 | 212 | 0.520527 | false |
nmc-probe/emulab-nome | tbsetup/plab/libdslice/dslice/nodemgrproxy.py | 1 | 5675 | """
Copyright (c) 2002 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
EXPORT LAWS: THIS LICENSE ADDS NO RESTRICTIONS TO THE EXPORT LAWS OF
YOUR JURISDICTION. It is licensee's responsibility to comply with any
export regulations applicable in licensee's jurisdiction. Under
CURRENT (May 2000) U.S. export regulations this software is eligible
for export from the U.S. and can be downloaded by or otherwise
exported or reexported worldwide EXCEPT to U.S. embargoed destinations
which include Cuba, Iraq, Libya, North Korea, Iran, Syria, Sudan,
Afghanistan and any other country to which the U.S. has embargoed
goods and services.
DESCRIPTION: Proxy class that implements client end of XML-RPC
communication with a node manager.
AUTHOR: Brent Chun ([email protected])
$Id: nodemgrproxy.py,v 1.1 2003-08-19 17:17:22 aclement Exp $
"""
from xmlrpclib import ServerProxy
from M2Crypto.m2xmlrpclib import SSL_Transport
from sslctx import clictxinit
class nodemgrproxy:
def __init__(self, host, port, sslport=None, key=None, cert=None, cacert=None):
self.host = host
self.port = port
self.sslport = sslport
self.key = key
self.cert = cert
self.cacert = cacert
def getconfig(self):
s = ServerProxy("http://%s:%d" % (self.host, self.port))
return s.getconfig()
def getleases(self):
s = ServerProxy("http://%s:%d" % (self.host, self.port))
return s.getleases()
def getslivers(self):
s = ServerProxy("http://%s:%d" % (self.host, self.port))
return s.getslivers()
def getprinciple(self, slice):
s = ServerProxy("http://%s:%d" % (self.host, self.port))
params = { "slice" : slice }
return s.getprinciple(params)
def getsshkeys(self, slice):
s = ServerProxy("http://%s:%d" % (self.host, self.port))
params = { "slice" : slice }
return s.getsshkeys(params)
def newlease(self, ticketdata):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "ticketdata" : ticketdata }
return s.newlease(params)
def newvm(self, leasedata, privatekey, publickey):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "leasedata" : leasedata, "privatekey" : privatekey,
"publickey" : publickey }
return s.newvm(params)
def newleasevm(self, ticketdata, privatekey, publickey):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "ticketdata" : ticketdata, "privatekey" : privatekey,
"publickey" : publickey }
return s.newleasevm(params)
def deletelease(self, slice):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "slice" : slice }
return s.deletelease(params)
def renewlease(self, slice):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "slice" : slice }
return s.renewlease(params)
def addkey(self, slice, key):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "slice" : slice, "key" : key }
return s.addkey(params)
def delkey(self, slice, key):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "slice" : slice, "key" : key }
return s.delkey(params)
def nukekeys(self, slice):
ctx = clictxinit(self.cert, self.key, self.cacert)
s = ServerProxy("https://%s:%d" % (self.host, self.sslport), SSL_Transport(ctx))
params = { "slice" : slice }
return s.nukekeys(params)
| agpl-3.0 | -1,721,309,583,470,177,000 | 41.350746 | 88 | 0.672423 | false |
robcarver17/pysystemtrade | sysexecution/order_stacks/contract_order_stack.py | 1 | 2783 | import datetime
from copy import copy
from syscore.objects import missing_order
from sysexecution.order_stacks.order_stack import orderStackData, missingOrder
from sysexecution.trade_qty import tradeQuantity
from sysexecution.orders.contract_orders import contractOrder
class contractOrderStackData(orderStackData):
def _name(self):
return "Contract order stack"
def add_controlling_algo_ref(self, order_id: int,
control_algo_ref: str):
"""
:param order_id: int
:param control_algo_ref: str or None
:return:
"""
if control_algo_ref is None:
return self.release_order_from_algo_control(order_id)
existing_order = self.get_order_with_id_from_stack(order_id)
if existing_order is missing_order:
error_msg ="Can't add controlling ago as order %d doesn't exist" % order_id
self.log.warn(error_msg)
raise missingOrder(error_msg)
try:
modified_order = copy(existing_order)
modified_order.add_controlling_algo_ref(control_algo_ref)
self._change_order_on_stack(order_id, modified_order)
except Exception as e:
log = existing_order.log_with_attributes(self.log)
error_msg = "%s couldn't add controlling algo %s to order %d" % \
(str(e), control_algo_ref, order_id)
log.warn(error_msg)
raise Exception(error_msg)
def release_order_from_algo_control(self, order_id: int):
existing_order = self.get_order_with_id_from_stack(order_id)
if existing_order is missing_order:
error_msg ="Can't add controlling ago as order %d doesn't exist" % order_id
self.log.warn(error_msg)
raise missingOrder(error_msg)
order_is_not_controlled = not existing_order.is_order_controlled_by_algo()
if order_is_not_controlled:
# No change required
return None
try:
modified_order = copy(existing_order)
modified_order.release_order_from_algo_control()
self._change_order_on_stack(order_id, modified_order)
except Exception as e:
log = existing_order.log_with_attributes(self.log)
error_msg = "%s couldn't remove controlling algo from order %d" % \
(str(e), order_id)
log.warn(error_msg)
raise Exception(error_msg)
def get_order_with_id_from_stack(self, order_id: int) -> contractOrder:
# probably will be overriden in data implementation
# only here so the appropriate type is shown as being returned
order = self.stack.get(order_id, missing_order)
return order
| gpl-3.0 | 2,876,840,782,749,514,000 | 37.123288 | 87 | 0.621272 | false |
corvis/heats-roompi | src/common/bootstrap.py | 1 | 7736 | import sys
import gc
import logging
import os.path
from typing import List, Tuple
from common import parse_utils
from common.drivers import ModuleDiscoveryDriver
from common.model import Module, PipedEvent
from common.utils import int_to_hex4str
from modules import StandardModulesOnlyDriver
from .errors import ConfigValidationError, InvalidDriverError
from .core import ApplicationManager
MODULE_DISCOVERY_DRIVER = ModuleDiscoveryDriver.typeid()
__logger = logging.getLogger('Bootstrap')
def bootstrap(config: dict) -> ApplicationManager:
application = ApplicationManager()
# Save instance config
__logger.info('Reading config file')
__save_instance_config(config, application)
__load_context_path(application)
__logger.info('Config captured')
gc.collect()
# Load drivers
__logger.info('Loading drivers')
__load_drivers(config, application)
__logger.info('Drivers loaded')
gc.collect()
# Discover modules
module_discovery_driver = application.get_driver(MODULE_DISCOVERY_DRIVER)
module_discovery_driver.discover_modules(application.get_module_registry())
# Instantiate components
devices_and_configs = __instantiate_devices(config, application)
# Build pipe table
__build_pipes(devices_and_configs, application)
# Initialize components
# Run event handling loop
application.thread_manager.request_thread('EventLoop', application.event_loop,
step_interval=application.EVENT_HANDLING_LOOP_INTERVAL)
application.thread_manager.request_thread('BgLoop', application.background_tasks_loop,
step_interval=application.BG_TASK_HANDLING_LOOP_INTERVAL)
return application
def __save_instance_config(config: dict, application: ApplicationManager):
if 'instance' in config:
settings = application.get_instance_settings()
settings.id = config.get('id')
# Context Path
for path in config.get('context_path', []):
if isinstance(path, str):
if not os.path.exists(path):
raise ConfigValidationError('instance/context_path', 'path ' + path + ' doesn\'t exist')
settings.context_path.append(path)
else:
raise ConfigValidationError('instance/context_path', 'Context path should be the list of strings')
def __load_context_path(application: ApplicationManager):
context_path = application.get_instance_settings().context_path
for p in context_path:
sys.path.append(p)
def __load_drivers(config: dict, application: ApplicationManager):
drivers = config.get('drivers', [])
for d in drivers:
driver_class_name = None
if isinstance(d, str):
driver_class_name = d
elif isinstance(d, dict) and 'class' in d:
driver_class_name = d.get('class')
if driver_class_name is None:
raise ConfigValidationError('drivers', 'Section is invalid')
application.register_driver(driver_class_name)
# Check if there is module discovery driver loaded. If not - load default implementation
try:
application.get_driver(MODULE_DISCOVERY_DRIVER)
except InvalidDriverError:
__logger.info("Since no module discovery driver provided only standard modules might be used")
application.register_driver(StandardModulesOnlyDriver)
def __instantiate_devices(config: dict, application: ApplicationManager) -> List[Tuple[Module, dict]]:
devices_with_config = []
devices = config.get('devices', {})
counter = 0x0200
for name, device_def in devices.items():
if not (isinstance(device_def, dict) or 'module_name' in device_def):
raise ConfigValidationError('devices/' + name, 'Should be dictionary containing mandatory module_name key')
module_registry = application.get_module_registry()
instance = module_registry.create_module_instance(
application,
typeid=module_registry.find_module_by_name(device_def.get('module_name')).typeid(),
instance_name=name,
instance_id=counter + 1
)
try:
# Parse and set parameters
for param_def in instance.PARAMS:
if param_def.name in device_def:
val = device_def[param_def.name]
if param_def.parser is not None:
val = param_def.parser.parse(val, application, 'devices/{}/{}'.format(name, param_def.name))
param_def.validate(val)
setattr(instance, param_def.name, val)
elif param_def.is_required:
raise ConfigValidationError('devices/{}/{}'.format(name, param_def.name),
'Parameter {} is required'.format(param_def.name))
# Run validation of the overall device
instance.validate()
instance.on_initialized()
except Exception as e:
raise ConfigValidationError("devices/" + name, "Invalid device configuration: " + str(e), e)
application.register_device(instance)
devices_with_config.append((instance, device_def))
__logger.debug("Initialized device {}({}). Type: {}({})".format(int_to_hex4str(instance.id), instance.name,
int_to_hex4str(instance.typeid()),
instance.type_name()))
counter += 1
return devices_with_config
def __build_pipes(devices_and_configs: List[Tuple[Module, dict]], application: ApplicationManager):
for pair in devices_and_configs:
device, device_config = pair
pipe_data = device_config.get('pipe')
if pipe_data is not None:
for event_name, link_data in pipe_data.items():
event = device.get_event_by_name(event_name)
if event is None:
raise ConfigValidationError('devices.{}.pipe.{}'.format(device.name, event),
'Unknown event name: ' + event_name)
# If link data is just string we will treat it as single item list
if isinstance(link_data, str):
link_data = [link_data]
for link_string in link_data:
linked_device_name, action_name = parse_utils.parse_link_string(link_string)
linked_device = application.get_device_by_name(linked_device_name)
if linked_device is None:
raise ConfigValidationError('devices.{}.pipe.{}'.format(device.name, event),
'Linked device: ' + linked_device_name + " doesn't exist")
action = linked_device.get_action_by_name(action_name)
if action is None:
raise ConfigValidationError('devices.{}.pipe.{}'.format(device.name, event),
'Action {} is not supported by {}'.format(action_name,
linked_device_name))
piped_event = PipedEvent(
declared_in=device,
target=linked_device,
event=event,
action=action
)
application.register_pipe(piped_event)
__logger.info('Piped event "{}" from #{} -> {}'.format(event_name, device.name, link_string))
| gpl-2.0 | 515,392,509,568,603,970 | 46.753086 | 119 | 0.595786 | false |
monkeymia/js | mmdb/Test_DB_Interface.py | 1 | 13315 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# https://github.com/monkeymia/
#
# Copyright (c) 2014, monkeymia, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library
#
# To make it work in Ubuntu:
# sudo apt-get install python-pip
# sudo apt-get install python-dev
# sudo pip install coverage
# sudo pip install nose
# sudo pip install flake8
#
# run tests with:
# nosetests --with-coverage Test*.py
# flake8 *.py --show-source --statistics --select=E
#
# run tests with html output:
# nosetests --with-coverage --cover-html Test*.py
#
# Delete trailing whitespaces: sed -i.bak 's/[[:blank:]]*$//' "$1"
#
#
import DB_Interface
import unittest
import StringIO
class Test_DB_Interface(unittest.TestCase):
db_name = "unit_test_db_interface"
table_name = "table_test_db_interface"
def setUp(self):
self.db = DB_Interface.DB_Interface()
self.req = StringIO.StringIO()
res = self.db.clear_dbs()
self.assertTrue(res)
res = self.db.mk_db(self.db_name)
self.assertTrue(res)
res = self.db.use_db(self.db_name)
self.assertTrue(res)
col = []
col.append({"type": "INT", "name": "col1"})
col.append({"type": "INT", "name": "col2"})
col.append({"type": "INT", "name": "col3"})
res = self.db.mk_table(self.table_name, col)
self.assertTrue(res)
# end def
def test_close(self):
self.db.close()
self.assertEqual(self.db.__class__._connection, None)
# end def
def test_clear(self):
self.db.clear_dbs()
result = self.db.ls_dbs()
self.assertTrue(result.rowcount > 0)
self.assertTrue(result)
# end def
def test_del_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertTrue(res)
res = self.db.new_row(self.table_name, "foo1", test_vals)
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo1")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_del_row_all(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertTrue(res)
res = self.db.new_row(self.table_name, "foo1", test_vals)
self.assertTrue(res)
res = self.db.del_row(self.table_name, None, del_all=True)
self.assertEqual(str(res), "lc=0,lr=0,rc=2,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_del_row_e1(self):
# check what happens if no row
self.assertRaises(
NotImplementedError, self.db.del_row, self.table_name, "foo")
# end def
def test_get_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue("foo" in res.rows[0])
self.assertTrue(res)
res = self.db.get_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.get_row(self.table_name, "foo", cols=["col1"])
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertTrue(res)
# end def
def test_get_row_e1(self):
# check what happens if no row
res = self.db.get_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertFalse(res)
# end def
def test_has_row_e1(self):
# check what happens if no row
res = self.db.has_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertFalse(res)
# end def
def test_ls_layouts_e1(self):
self.db.extension_json = "invalid_unrealistic_extension"
res = self.db.ls_layouts()
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_ls_dbs(self):
res = self.db.ls_dbs()
self.assertTrue(len(res.rows) > 0)
self.assertTrue(self.db_name in res.rows)
self.assertTrue(res)
# end def
def test_ls_cols(self):
res = self.db.ls_cols(self.table_name)
self.assertEqual(str(res), "lc=1,lr=4,rc=4,le=0")
self.assertTrue("col1" in res.rows)
self.assertTrue("col2" in res.rows)
self.assertTrue("col3" in res.rows)
self.assertTrue(res)
# end def
def test_ls_cols_e1(self):
res = self.db.ls_cols("invalid")
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertFalse("col1" in res.rows)
self.assertFalse("col2" in res.rows)
self.assertFalse("col3" in res.rows)
self.assertTrue(res)
# end def
def test_ls_rows(self):
res = self.db.ls_rows(self.table_name)
self.assertEqual(len(res.rows), 0)
self.assertEqual(str(res), "lc=4,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_ls_tables(self):
res = self.db.ls_tables()
self.assertEqual(len(res.rows), 1)
self.assertEqual(str(res.singleton()), self.table_name)
self.assertTrue(res)
# end def
def test_mk_db(self):
res = self.db.mk_db("test2")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db("test2")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), "test2")
self.assertTrue(res)
res = self.db.rm_db("test2")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(res.singleton(), None)
self.assertTrue(res)
# end def
def test_mk_tables(self):
n = "test_asdf"
res = self.db.mk_table(n, [])
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.ls_cols(n)
self.assertEqual(len(res.rows), 1) # primary key
self.assertTrue(res)
res = self.db.rm_table(n)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
# end def
def test_new_db_e1(self):
res = self.db.new_db("foo", "invalid_unrealistic_layout")
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
# end def
def test_new_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.new_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, "foo")
self.assertTrue(res)
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertEqual(str(res), "lc=4,lr=1,rc=1,le=0")
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
# end def
def test_new_row_e1(self):
# if key is too long the new command fails silent.
test_vals = {"col1": 1, "col2": 2, "col3": 3}
key = "f" * 100
res = self.db.new_row(self.table_name, key, test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, key)
self.assertFalse(res)
self.assertFalse(res)
# end def
def test_set_row(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.has_row(self.table_name, "foo")
self.assertTrue(res)
self.assertTrue(res)
res = self.db.del_row(self.table_name, "foo")
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
# end def
def test_set_row_1(self):
test_vals = {"col1": 1, "col2": 2, "col3": 3}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=4,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue("foo" in res.rows[0])
self.assertTrue(3 in res.rows[0])
self.assertTrue(res)
test_vals = {"col1": 1, "col2": 2, "col3": 4}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue(4 in res.rows[0])
self.assertTrue(res)
test_vals = {"col1": 5, "col2": 6}
res = self.db.set_row(self.table_name, "foo", test_vals)
self.assertEqual(str(res), "lc=0,lr=0,rc=1,le=0")
self.assertTrue(res)
res = self.db.ls_rows(self.table_name)
self.assertTrue(4 in res.rows[0])
self.assertTrue(5 in res.rows[0])
self.assertTrue(6 in res.rows[0])
self.assertTrue(res)
# end def
def test_pwd_db(self):
res = self.db.pwd_db()
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db(self):
res = self.db.use_db(self.db_name)
self.assertEqual(res.errors, [])
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res), "lc=1,lr=1,rc=1,le=0")
self.assertTrue(res)
# end def
def test_use_db_e1(self):
# Invalid Table name will not change current used table.
res = self.db.use_db(self.db_name)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db(self.table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db_e2(self):
# Invalid Table name will not change current used table.
res = self.db.use_db(self.db_name)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
res = self.db.use_db(None)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), self.db_name)
self.assertTrue(res)
# end def
def test_use_db_e3(self):
# check which function works if no selected table.
db_name = "unit_test_e3"
table_name = "unit_test_e3_table"
res = self.db.mk_db(db_name)
self.assertTrue(res)
res = self.db.use_db(db_name)
self.assertTrue(res)
res = self.db.rm_db(db_name)
self.assertTrue(res)
res = self.db.pwd_db()
self.assertEqual(str(res.singleton()), str(None))
self.assertTrue(res)
res = self.db.mk_table(table_name, {})
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.ls_rows(table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.ls_cols(table_name)
self.assertEqual(str(res), "lc=1,lr=0,rc=0,le=0")
self.assertTrue(res)
res = self.db.ls_tables()
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
res = self.db.rm_table(table_name)
self.assertEqual(str(res), "lc=0,lr=0,rc=0,le=1")
self.assertFalse(res)
# end def
def tearDown(self):
# res = self.db.rm_db(self.db_name)
pass
# end def
# end class
if __name__ == '__main__':
unittest.main()
# __END__
| lgpl-3.0 | 5,546,769,965,514,912,000 | 33.765013 | 73 | 0.584454 | false |
joke2k/faker | tests/providers/test_company.py | 1 | 16381 | import re
from datetime import datetime
from unittest.mock import patch
import pytest
from faker.providers.company.en_PH import Provider as EnPhCompanyProvider
from faker.providers.company.fil_PH import Provider as FilPhCompanyProvider
from faker.providers.company.hu_HU import Provider as HuHuCompanyProvider
from faker.providers.company.hy_AM import Provider as HyAmCompanyProvider
from faker.providers.company.it_IT import Provider as ItItCompanyProvider
from faker.providers.company.ja_JP import Provider as JaJpCompanyProvider
from faker.providers.company.nl_NL import Provider as NlNlCompanyProvider
from faker.providers.company.pl_PL import Provider as PlPlCompanyProvider
from faker.providers.company.pl_PL import company_vat_checksum, local_regon_checksum, regon_checksum
from faker.providers.company.pt_BR import company_id_checksum
from faker.providers.company.ro_RO import Provider as RoRoCompanyProvider
from faker.providers.company.ru_RU import Provider as RuRuCompanyProvider
from faker.providers.company.ru_RU import calculate_checksum
from faker.providers.company.th_TH import Provider as ThThCompanyProvider
from faker.providers.company.tr_TR import Provider as TrTrCompanyProvider
class TestFiFi:
"""Test fi_FI company provider methods"""
def _has_valid_checksum(self, company_id):
factors = [7, 9, 10, 5, 8, 4, 2]
checksum = 0
for x, y in zip(company_id[:-2], factors):
checksum += int(x) * y
checksum %= 11
checksum = 11 - checksum if checksum else 0
return int(company_id[-1]) == checksum
def test_company_business_id(self, faker, num_samples):
for _ in range(num_samples):
company_id = faker.company_business_id()
assert len(company_id) == 9
assert self._has_valid_checksum(company_id)
class TestHyAm:
"""Test hy_AM company provider methods"""
def test_bs(self, faker, num_samples):
for _ in range(num_samples):
bs = faker.bs()
assert isinstance(bs, str)
def test_catch_phrase(self, faker, num_samples):
for _ in range(num_samples):
catch_phrase = faker.catch_phrase()
assert isinstance(catch_phrase, str)
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in HyAmCompanyProvider.company_suffixes
class TestJaJp:
"""Test ja_JP company provider methods"""
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in JaJpCompanyProvider.company_prefixes
def test_company_category(self, faker, num_samples):
for _ in range(num_samples):
category = faker.company_category()
assert isinstance(category, str)
assert category in JaJpCompanyProvider.company_categories
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
assert any(
company.startswith(prefix) or company.endswith(prefix)
for prefix in JaJpCompanyProvider.company_prefixes
)
assert any(
category in company
for category in JaJpCompanyProvider.company_categories
)
class TestPtBr:
"""Test pt_BR company provider methods"""
def test_company_id_checksum(self):
assert company_id_checksum([9, 4, 9, 5, 3, 4, 4, 1, 0, 0, 0, 1]) == [5, 1]
assert company_id_checksum([1, 6, 0, 0, 4, 6, 3, 9, 0, 0, 0, 1]) == [8, 5]
def test_company_id(self, faker, num_samples):
for _ in range(num_samples):
company_id = faker.company_id()
assert re.fullmatch(r'\d{14}', company_id)
def test_cnpj(self, faker, num_samples):
for _ in range(num_samples):
cnpj = faker.cnpj()
assert re.fullmatch(r'\d{2}\.\d{3}\.\d{3}/0001-\d{2}', cnpj)
class TestHuHu:
"""Test hu_HU company provider methods"""
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in HuHuCompanyProvider.company_suffixes
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
assert company.split(" ")[-1] in HuHuCompanyProvider.company_suffixes
class TestPlPl:
"""Test pl_PL company provider methods"""
def test_regon_checksum(self):
assert regon_checksum([1, 2, 3, 4, 5, 6, 7, 8]) == 5
assert regon_checksum([8, 9, 1, 9, 5, 7, 8, 8]) == 3
assert regon_checksum([2, 1, 7, 1, 5, 4, 8, 3]) == 8
assert regon_checksum([7, 9, 3, 5, 4, 7, 9, 3]) == 9
assert regon_checksum([9, 1, 5, 9, 6, 9, 4, 7]) == 7
def test_regon(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r'\d{9}', faker.regon())
def test_local_regon_checksum(self):
assert local_regon_checksum([1, 2, 3, 4, 5, 6, 7, 8, 5, 1, 2, 3, 4]) == 7
assert local_regon_checksum([6, 1, 1, 9, 4, 8, 8, 3, 2, 7, 5, 8, 0]) == 3
assert local_regon_checksum([8, 9, 2, 0, 0, 3, 6, 6, 0, 7, 0, 3, 2]) == 3
assert local_regon_checksum([3, 5, 7, 7, 1, 0, 2, 2, 2, 5, 4, 3, 3]) == 0
assert local_regon_checksum([9, 3, 5, 3, 1, 1, 0, 1, 2, 4, 8, 8, 2]) == 1
def test_local_regon(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r'\d{14}', faker.local_regon())
def test_company_vat_checksum(self):
assert company_vat_checksum([7, 7, 5, 7, 7, 7, 6, 0, 5]) == 9
assert company_vat_checksum([1, 8, 6, 5, 4, 9, 9, 6, 4]) == 2
assert company_vat_checksum([7, 1, 2, 8, 9, 2, 4, 9, 9]) == 7
assert company_vat_checksum([3, 5, 4, 6, 1, 0, 6, 5, 8]) == 4
assert company_vat_checksum([3, 1, 9, 5, 5, 7, 0, 4, 5]) == 0
def test_company_vat(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r'\d{10}', faker.company_vat())
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in PlPlCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in PlPlCompanyProvider.company_suffixes
class TestNlNl:
"""Test nl_NL company provider methods"""
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in NlNlCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in NlNlCompanyProvider.company_suffixes
def test_large_companies(self, faker, num_samples):
for _ in range(num_samples):
company = faker.large_company()
assert isinstance(company, str)
assert company in NlNlCompanyProvider.large_companies
class TestEnPh:
"""Test en_PH company provider methods"""
@classmethod
def setup_class(cls):
cls.company_types = EnPhCompanyProvider.company_types
cls.company_suffixes = EnPhCompanyProvider.company_suffixes.keys()
cls.company_products = EnPhCompanyProvider.company_products
cls.national_corporation_pattern = re.compile(r'^National (.*?) Corporation of the Philippines$')
def test_random_company_noun_chain(self, faker, num_samples):
for _ in range(num_samples):
noun_list = faker.random_company_noun_chain().split()
assert 1 <= len(noun_list) <= 2
def test_random_company_acronym(self, faker, num_samples):
for _ in range(num_samples):
acronym = faker.random_company_acronym()
assert 2 <= len(acronym) <= 4
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
if company.split()[-1] in self.company_suffixes and company.split()[-2] in self.company_types:
continue
else:
national_corporation_match = self.national_corporation_pattern.fullmatch(company)
assert national_corporation_match and national_corporation_match.group(1) in self.company_products
class TestFilPh(TestEnPh):
"""Test fil_PH company provider methods"""
def test_PH_random_good_service_adjective_chain(self, faker, num_samples):
for _ in range(num_samples):
adjectives = faker.random_good_service_adjective_chain().split(' at ')
assert all(
adjective in FilPhCompanyProvider.good_service_adjectives
for adjective in adjectives
)
class TestTlPh(TestFilPh):
"""Test tl_PH company provider methods"""
pass
class TestRuRu:
"""Test ru_RU company provider methods"""
def test_calculate_checksum_nine_digits(self):
assert calculate_checksum('164027304') == '7'
assert calculate_checksum('629082979') == '0'
assert calculate_checksum('0203184580') == '5'
assert calculate_checksum('1113145630') == '0'
assert calculate_checksum('70517081385') == '1'
assert calculate_checksum('60307390550') == '0'
def test_businesses_inn(self, faker, num_samples):
for _ in range(num_samples):
inn = faker.businesses_inn()
assert len(inn) == 10
assert calculate_checksum(inn[:9]) == inn[9]
def test_individuals_inn(self, faker, num_samples):
for _ in range(num_samples):
inn = faker.individuals_inn()
assert len(inn) == 12
assert calculate_checksum(inn[:10]) == inn[10]
assert calculate_checksum(inn[:11]) == inn[11]
def test_businesses_ogrn(self, faker, num_samples):
max_year = datetime.now().year - 2000
for _ in range(num_samples):
ogrn = faker.businesses_ogrn()
assert len(ogrn) == 13
assert ogrn[0] in ('1', '5')
assert 1 <= int(ogrn[1:3]) <= max_year
assert 1 <= int(ogrn[3:5]) <= 92
assert int(ogrn[:-1]) % 11 % 10 == int(ogrn[-1])
def test_individuals_ogrn(self, faker, num_samples):
max_year = datetime.now().year - 2000
for _ in range(num_samples):
ogrn = faker.individuals_ogrn()
assert len(ogrn) == 15
assert ogrn[0] == '3'
assert 1 <= int(ogrn[1:3]) <= max_year
assert 1 <= int(ogrn[3:5]) <= 92
assert int(ogrn[:-1]) % 13 % 10 == int(ogrn[-1])
def test_kpp(self, faker, num_samples):
for _ in range(num_samples):
kpp = faker.kpp()
assert len(kpp) == 9
assert 1 <= int(kpp[0:2]) <= 92
assert int(kpp[2:4]) > 0
assert kpp[4:6] in ('01', '43', '44', '45')
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in RuRuCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in RuRuCompanyProvider.company_suffixes
def test_large_companies(self, faker, num_samples):
for _ in range(num_samples):
company = faker.large_company()
assert isinstance(company, str)
assert company in RuRuCompanyProvider.large_companies
def test_catchphrase(self, faker, num_samples):
for _ in range(num_samples):
catchphrase = faker.catch_phrase()
assert isinstance(catchphrase, str)
assert ' и ' in catchphrase
def test_bs(self, faker, num_samples):
for _ in range(num_samples):
bs = faker.bs()
bs_words = bs.split()
assert isinstance(bs, str)
assert bs_words[0] in RuRuCompanyProvider.bsWords[0]
class TestItIt:
"""Test it_IT company provider methods"""
vat_regex = re.compile(r"^IT\d{7}(0\d{2}|100|120|121|888|999)\d$", flags=re.ASCII)
def test_company_vat(self, faker, num_samples):
for _ in range(num_samples):
company_vat = faker.company_vat()
assert self.vat_regex.match(company_vat)
@pytest.mark.parametrize("value, expected", (
(100, "100"),
(101, "120"),
(102, "121"),
(103, "888"),
(104, "999"),
))
def test_company_vat_special_cases(self, faker, value, expected):
# this test allows to get full code coverage for company_vat fixing the internal state of the random generator
fake = ItItCompanyProvider(generator=faker)
with patch.object(fake, "random_int", return_value=value, autospec=True):
company_vat = fake.company_vat()
assert self.vat_regex.match(company_vat)
assert company_vat[9:12] == expected
class TestThTh:
"""Test th_TH company provider methods"""
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in ThThCompanyProvider.company_suffixes
def test_company_limited_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_limited_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.company_limited_prefixes
def test_company_limited_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_limited_suffix()
assert isinstance(suffix, str)
assert suffix in ThThCompanyProvider.company_limited_suffixes
def test_nonprofit_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.nonprofit_prefix()
assert isinstance(prefix, str)
assert prefix in ThThCompanyProvider.nonprofit_prefixes
def test_company(self, faker, num_samples):
for _ in range(num_samples):
company = faker.company()
assert isinstance(company, str)
class TestTrTr:
"""Test tr_TR company provider methods"""
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in TrTrCompanyProvider.company_suffixes
def test_large_companies(self, faker, num_samples):
for _ in range(num_samples):
company = faker.large_company()
assert isinstance(company, str)
assert company in TrTrCompanyProvider.large_companies
class TestRoRo:
"""Test ro_RO company provider methods"""
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in RoRoCompanyProvider.company_suffixes
| mit | -2,637,556,588,902,302,700 | 37.723404 | 118 | 0.615018 | false |
RedhawkSDR/FrontEndController | python/FrontEndController.py | 1 | 11241 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK FrontEndController.
#
# REDHAWK FrontEndController is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK FrontEndController is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# AUTO-GENERATED
#
# Source: FrontEndController.spd.xml
from ossie.resource import start_component
import logging
from FrontEndController_base import *
from ossie.utils import redhawk
import frontend
from redhawk.frontendInterfaces import FRONTEND
class FrontEndController_i(FrontEndController_base):
"""
This component was developed to support the REDHAWK LiveDVD. It also serves to provides a simple
example of FEI Allocations and connections. It was developed with REDHAWK 1.10.0.
"""
targetComponent=None
targetDevice=None
domain = None
targetComponentPort = None
targetDevicePort = None
feiTunerPort = None
connected = False
def initialize(self):
FrontEndController_base.initialize(self)
self.addPropertyChangeListener("TuneRequest", self.TuneRequest_changed)
def start(self):
FrontEndController_base.start(self)
if not self.connected:
self.connectAndTune()
def stop(self):
self._log.debug("Stop called.")
try:
if self.connected:
self.targetDevice.deallocateCapacity(self.allocationRequest)
self.targetDevicePort.disconnectPort(self.allocationId)
except:
self._log.error("Exception occurred while deallocating and disconnecting.")
finally:
self.connected = False
FrontEndController_base.stop(self)
def TuneRequest_changed(self, propid, oldval, newval):
self._log.debug("Received Tune Request Change")
self._log.debug("Currently Connected: " + str(self.connected))
if self.connected:
try:
if (oldval.frequency != newval.frequency):
self._log.debug("Trying to set frequency to: " + str(newval.frequency))
self.feiTunerPort.setTunerCenterFrequency(self.allocationId, newval.frequency*1e6)
if (oldval.sampleRate != newval.sampleRate):
self._log.debug("Trying to set sample rate to: " + str(newval.sampleRate))
self.feiTunerPort.setTunerOutputSampleRate(self.allocationId, newval.sampleRate*1e6)
except FRONTEND.BadParameterException as ex:
self._log.error("Bad Parameter Exception Thrown: " + str(ex))
except FRONTEND.NotSupportedException as ex:
self._log.error("Not Supported Exception Thrown: " + str(ex))
except FRONTEND.FrontendException as ex:
self._log.error("Front End Exception Thrown: " + str(ex))
except Exception as ex:
self._log.error("Failed to set property: " + str(ex))
finally:
self.TuneRequest.frequency = self.feiTunerPort.getTunerCenterFrequency(self.allocationId) / 1e6
self._log.debug("Actual frequency: " + str(self.TuneRequest.frequency))
self.TuneRequest.sampleRate = self.feiTunerPort.getTunerOutputSampleRate(self.allocationId) / 1e6
self._log.debug("Actual sample rate: " + str(self.TuneRequest.sampleRate))
def connectAndTune(self):
# Lets make sure we have everything we need before continuing.
if not self.InputComponent.componentName:
self._log.error("Stopping. Component name must be specified.")
self.stop()
return
if not self.InputComponent.inputPortName:
self._log.error("Stopping. Component input port name must be specified.")
self.stop()
return
if not self.FEIDevice.deviceName:
self._log.error("Stopping. Device name must be specified.")
self.stop()
return
if not self.FEIDevice.outputPortName:
self._log.error("Stopping. Device output port name must be specified.")
self.stop()
return
if not self.FEIDevice.tunerPortName:
self._log.error("Stopping. Device tuner port name must be specified.")
self.stop()
return
# While the domain port does give us a direct connection to the domain, the
# API exposed is cleaner from the domain instance returned via the redhawk.attach method.
try:
domainname = self.port_DomainManager_out._get_name()
self.domain = redhawk.attach(domainname)
except Exception as ex:
self._log.error("Failed to connect to domain: " + str(ex))
self.stop()
return
if self.domain is None:
self._log.error("Stopping. Could not connect to domain.")
self.stop()
return
self._log.debug("Searching for the current waveform in the domain")
waveform = self.findWaveformByComponentInstanceName(self._name)
if waveform is None:
self._log.error("Stopping. Could not find the running waveform.")
self.stop();
return
self._log.debug("Searching for the component in the waveform: " + str(waveform.name))
# Gets the component from the application. The component name can be the name or instantition. ex. DataConverter or DataConveter_3
# This allows you to use the same component multiple times in a waveform and know for certain which one you are connecting to.
for comp in waveform.comps:
if self.InputComponent.componentName in comp._instanceName:
self.targetComponent = comp
break
if self.targetComponent is None:
self._log.error("Stopping. Could not find the component: " + self.InputComponent.componentName)
self.stop();
return
self._log.debug("Searching device managers for device: " + self.FEIDevice.deviceName)
self.targetDevice = self.findByDeviceName(self.FEIDevice.deviceName)
if self.targetDevice is None:
self._log.error("Stopping. Could not find the device: " + self.FEIDevice.deviceName)
self.stop()
return
# Gets the references to the input and output ports
self.targetComponentPort = self.targetComponent.getPort(self.InputComponent.inputPortName)
self.targetDevicePort = self.targetDevice.getPort(self.FEIDevice.outputPortName)
self.feiTunerPort = self.targetDevice.getPort(self.FEIDevice.tunerPortName)
if self.targetComponentPort is None:
self._log.error("Stopping. Could not find the component input port: " + self.InputComponent.inputPortName)
self.stop()
return
if self.targetDevicePort is None:
self._log.error("Stopping. Could not find the component output port: " + self.FEIDevice.outputPortName)
self.stop()
return
if self.feiTunerPort is None:
self._log.error("Stopping. Could not find the tuner port: " + self.FEIDevice.tunerPortName)
self.stop()
return
self.allocationRequest = frontend.createTunerAllocation(
tuner_type = self.tunerType,
allocation_id = self.allocationId,
center_frequency = self.TuneRequest.frequency * 1e6,
sample_rate = self.TuneRequest.sampleRate * 1e6,
sample_rate_tolerance = 20.0
)
self._log.debug("Performing allocation of FEI Device")
self._log.debug("Allocation contains: " + str(self.allocationRequest))
retVal = False
try:
retVal = self.targetDevice.allocateCapacity(self.allocationRequest)
except CF.Device.InvalidCapacity as ex:
self._log.error("Device has invalid capacity, allocation failed: " + str(ex))
except CF.Device.InvalidState as ex:
self._log.error("Device in invalid state, allocation failed: " + str(ex))
except Exception as ex:
self._log.error("Exception thrown while allocating: " + str(ex))
if (retVal is False):
self._log.error("Allocation failed. Stopping.")
self.stop()
return
self._log.debug("Allocation succeeded!")
# Makes the actual connection
self._log.debug("Connecting component and device ports")
self.targetDevicePort.connectPort(self.targetComponentPort, self.allocationId)
self.connected = True
self._log.debug("Starting device and component")
# Make sure device and component are started
self.targetDevice.start()
self.targetComponent.start()
# This component does no processing so we can just return FINISH so
# the process method does not get called again.
def process(self):
return FINISH
def findWaveformByComponentInstanceName(self, name):
# Gets a reference to the running application
for app in self.domain.apps:
# Find desired application
for comp in app.comps:
self._log.trace("Checking if " + name + " is in " + comp._instanceName)
if name in comp._instanceName:
return app
return None
def findByDeviceName(self, dev_name):
for devMgr in self.domain.devMgrs:
for dev in devMgr.devs:
self._log.trace("Checking if " + dev_name + " is in " + dev._instanceName)
if dev_name in dev._instanceName:
return dev
return None
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.debug("Starting Component")
start_component(FrontEndController_i)
| lgpl-3.0 | 5,431,343,900,330,088,000 | 40.025547 | 141 | 0.601993 | false |
noracami/ballin-lana | djangoconnectiondashboard/djangoconnectiondashboard/settings/base.py | 1 | 2970 | """
Django settings for djangoconnectiondashboard project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse_lazy
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# # SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = ''
#
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'teams',
'pages',
'base',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'djangoconnectiondashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoconnectiondashboard.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'ROC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
LOGIN_REDIRECT_URL = reverse_lazy('home')
def get_env_var(key):
try:
return os.environ[key]
except KeyError:
raise ImproperlyConfigured(
'Environment variable {key} required.'.format(key=key)
)
| cc0-1.0 | 4,128,789,508,909,835,000 | 25.517857 | 72 | 0.696633 | false |
srio/dabam | code/dabam_python_example.py | 1 | 1915 | import dabam
if __name__ == '__main__':
#
# get summary table
#
text = dabam.dabam_summary()
print(text)
out = dabam.dabam_summary_dictionary()
for i in range(len(out)):
print("i=:%d, entry:%d"%(i,(out[i])["entry"]))
print(out[13])
#
# load a given entry (=14)
#
dm = dabam.dabam()
dm.set_input_silent(True)
dm.load(14)
info = dm.info_profiles()
print(info)
#
# make a bunh of plots
#
dm.set_input_plot("heights slopes psd_h csd_h acf_h histo_h")
dm.plot()
# you can do plots by accessing data, ex: plt.plot(1e3*dm.y,1e6*dm.zHeights)
# dm.metadata # metadata
# dm.rawdata # raw datafile
# dm.y # abscissa along the mirror
# dm.zSlopesUndetrended # undetrended slope profile
# dm.zSlopes # detrended slope profile
# dm.zHeightsUndetrended # undetrended heights profile
# dm.zHeights # detrended heights profile
# dm.coeffs # information on detrending (polynomial coeffs)
# dm.f # frequency of Power Spectral Density
# dm.psdHeights # Power Spectral Density of Heights profile
# dm.psdSlopes # Power Spectral Density of slopes profile
# dm.csdHeights # Antiderivative of PDF of Heights profile
# dm.csdSlopes # Antiderivative of PDF of Slopes profile
# dm.histoSlopes # to store slopes histogram
# dm.histoHeights # to store heights histogram
# dm.momentsSlopes # to store moments of the slopes profile
# dm.momentsHeights # to store moments of the heights profile
# dm.powerlaw {"hgt_pendent":None, "hgt_shift":None, "slp_pendent":None, "slp_shift":None,
| gpl-2.0 | 4,966,671,420,835,856,000 | 32.596491 | 111 | 0.556136 | false |
toomore/grs | grs/twseno.py | 1 | 6281 | # -*- coding: utf-8 -*-
''' TWSE stock no. '''
# Copyright (c) 2012, 2013, 2014 Toomore Chiang, http://toomore.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import csv
import os
import re
class ImportCSV(object):
""" Import CSV
:param path stock_no_files: 個股檔案列表
:param path industry_code_files: 個股分類表
"""
def __init__(self, stock_no_files, industry_code_files):
self.industry_code_files = industry_code_files
self.last_update = ''
self.stock_no_files = stock_no_files
self.__allstockno = self.importcsv()
def importcsv(self):
''' import data from csv '''
csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
try:
result[i[0]] = str(i[1]).decode('utf-8')
except ValueError:
if i[0] == 'UPDATE':
self.last_update = str(i[1]).decode('utf-8')
else:
pass
return result
def __industry_code(self):
''' import industry_code '''
csv_path = os.path.join(os.path.dirname(__file__),
self.industry_code_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
result[i[0]] = i[1].decode('utf-8')
return result
def __loadindcomps(self):
''' import industry comps '''
csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
check_words = re.compile(r'^[\d]{2,}[\w]?')
for i in csv_data:
if check_words.match(i[2]):
try:
result[i[2]].append(i[0].decode('utf-8'))
except (ValueError, KeyError):
try:
result[i[2]] = [i[0].decode('utf-8')]
except KeyError:
pass
return result
def search(self, name):
""" 搜尋股票名稱 by unicode
:param str name: 欲搜尋的字串
:rtype: dict
"""
pattern = re.compile(name)
result = {}
for i in self.__allstockno:
query = re.search(pattern, self.__allstockno[i])
if query:
query.group()
result[i] = self.__allstockno[i]
return result
def searchbyno(self, no):
""" 搜尋股票代碼
:param str no: 欲搜尋的字串
:rtype: dict
"""
pattern = re.compile(str(no))
result = {}
for i in self.__allstockno:
query = re.search(pattern, str(i))
if query:
query.group()
result[i] = self.__allstockno[i]
return result
@property
def all_stock(self):
""" 回傳股票代碼與名稱
:rtype: dict
"""
return self.__allstockno
@property
def all_stock_no(self):
""" 回傳股票代碼
:rtype: list
"""
return self.__allstockno.keys()
@property
def all_stock_name(self):
""" 回傳股票名稱
:rtype: list
"""
return self.__allstockno.values()
@property
def industry_code(self):
""" 回傳類別代碼
:rtype: dict
"""
return self.__industry_code()
@property
def industry_comps(self):
""" 回傳分類的股票
:rtype: dict
"""
return self.__loadindcomps()
def get_stock_comps_list(self):
""" 回傳日常交易的類別代碼與名稱
:rtype: dict
.. versionadded:: 0.5.6
"""
code_list = self.industry_code
stock_comps_list = {}
for i in code_list:
if len(i) == 2 and i.isdigit():
stock_comps_list.update({i: code_list[i]})
return stock_comps_list
def get_stock_list(self):
""" 回傳日常交易的代碼與名稱
:rtype: dict
.. versionadded:: 0.5.6
"""
all_stock = self.all_stock
industry_comps = self.industry_comps
result = {}
for comps_no in self.get_stock_comps_list():
if comps_no in industry_comps:
for stock_no in industry_comps[comps_no]:
result.update({stock_no: all_stock[stock_no]})
return result
class TWSENo(ImportCSV):
""" 上市股票代碼與搜尋 """
def __init__(self):
super(TWSENo, self).__init__('twse_list.csv', 'industry_code.csv')
class OTCNo(ImportCSV):
""" 上櫃股票(OTC, Over-the-counter) 代碼與搜尋"""
def __init__(self):
super(OTCNo, self).__init__('otc_list.csv', 'industry_code_otc.csv')
if __name__ == '__main__':
t = TWSENo()
#t = OTCNo()
t_list = t.get_stock_list()
print t_list
| mit | -4,252,215,680,980,006,000 | 28.827586 | 79 | 0.536581 | false |
aliok/trnltk | trnltk/morphology/learner/requesthandler/parseresultdetailhandler.py | 1 | 1499 | """
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from trnltk.morphology.learner.controller.parseresultdetailcontroller import ParseResultDetailController
from trnltk.morphology.learner.controller.sessionmanager import SessionManager
from trnltk.morphology.learner.requesthandler.sessionawarerequesthandler import SessionAwareRequestHandler
class ParseResultDetailHandler(SessionAwareRequestHandler):
def get(self):
param_parse_result_uuid = self.request.get('parseResultUUID')
if not param_parse_result_uuid:
raise Exception(u"Missing parameter : parseResultUUID")
sessionmanager = SessionManager(self.session)
controller = ParseResultDetailController(sessionmanager)
calculation_context = controller.get_calculation_context(param_parse_result_uuid)
self.render_response("parseresultdetailtemplate.jinja2", **{'calculation_context' : calculation_context, 'parse_result_uuid' : param_parse_result_uuid}) | apache-2.0 | 2,465,502,588,304,247,000 | 47.387097 | 160 | 0.78986 | false |
iandees/all-the-places | locations/spiders/pizzaranch.py | 1 | 3924 | import scrapy
import re
from locations.items import GeojsonPointItem
import json
class CVSSpider(scrapy.Spider):
name = "pizzaranch"
allowed_domains = ["pizzaranch.com"]
download_delay = 0.5
start_urls = (
'https://pizzaranch.com/locations',
)
def parse_times(self, times):
if times.strip() == 'Open 24 hours':
return '24/7'
hours_to = [x.strip() for x in times.split('-')]
cleaned_times = []
for hour in hours_to:
if re.search('PM$', hour):
hour = re.sub('PM', '', hour).strip()
hour_min = hour.split(":")
if int(hour_min[0]) < 12:
hour_min[0] = str(12 + int(hour_min[0]))
cleaned_times.append(":".join(hour_min))
if re.search('AM$', hour):
hour = re.sub('AM', '', hour).strip()
hour_min = hour.split(":")
if len(hour_min[0]) <2:
hour_min[0] = hour_min[0].zfill(2)
else:
hour_min[0] = str(int(hour_min[0]))
cleaned_times.append(":".join(hour_min))
return "-".join(cleaned_times)
def parse_hours(self, lis):
hours = []
for li in lis:
day = li.xpath('normalize-space(.//td[@class="c-location-hours-details-row-day"]/text())').extract_first()[:2]
times = li.xpath('.//td[@class="c-location-hours-details-row-intervals"]/span/span/text()').extract()
times = "".join(x for x in times)
if times and day:
parsed_time = self.parse_times(times)
hours.append(day + ' ' + parsed_time)
return "; ".join(hours)
def parse_stores(self, response):
map_data = response.xpath('normalize-space(//script[@id="js-map-config-dir-map-nap-map"]/text())').extract_first()
map_json = json.loads(map_data)
properties = {
'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(),
'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/span/text())').extract_first(),
'phone': response.xpath('normalize-space(//div[@class="c-phone-number c-phone-main-number"]/span[@class="c-phone-number-span c-phone-main-number-span"]/text())').extract_first(),
'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(),
'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
'ref': map_json['locs'][0]['id'],
'website': response.url,
'lat': float( map_json['locs'][0]['latitude']),
'lon': float( map_json['locs'][0]['longitude']),
}
hours = self.parse_hours(response.xpath('//div[@class="hours-primary hidden-xs"]/div[@class="c-location-hours"]/div[@class="c-location-hours-details-wrapper js-location-hours"]/table/tbody/tr'))
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse_state_stores(self, response):
stores = response.xpath('//h3[@class="title"]/a/@href').extract()
for store in stores:
yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)
next_page_url = response.xpath('//div[@class="pagination"]//li[@class="next"]/a/@href').extract_first()
if next_page_url:
yield scrapy.Request(next_page_url, callback=self.parse_state_stores)
def parse(self, response):
urls = response.xpath('//ol[@class="state-list"]/li/a/@href').extract()
for path in urls:
yield scrapy.Request(response.urljoin(path), callback=self.parse_state_stores)
| mit | -6,156,751,004,344,614,000 | 44.627907 | 202 | 0.568552 | false |
thegricean/sinking-marbles | models/wonky_world/scripts/parseResults.py | 1 | 4158 | import csv
import itertools
import random
import ast
import sys
#usage
# python parseResults.py results.txt
fname = '../results/model_results/'+sys.argv[1]
file_names = [fname]
itemfile = open("items.txt")
items = [" ".join(l.rstrip().split()) for l in itemfile.readlines()]
itemfile.close()
print items
lines = []
results = []
wresults = []
files = [open(fn) for fn in file_names]
for f in files:
lines.extend([l.rstrip() for l in f.readlines()])
#print lines
def getReducedAlternatives(alts):
basic = ""
lownum = ""
highnum = ""
extra = ""
twowords = ""
threewords = ""
if "some,all,none" in alts:
basic = "0_basic"
if "one,two,three" in alts:
lownum = "1_lownum"
if "eleven" in alts:
highnum = "3_highnum"
if "many" in alts:
extra = "2_extra"
if "almostall" in alts:
twowords = "4_twowords"
if "lessthanhalf" in alts:
threewords = "5_threewords"
return "".join([basic,lownum,extra,highnum,twowords,threewords])
headers = ["Item","QUD","NumState","Alternatives","SpeakerOptimality","PriorProbability-0","PriorProbability-1","PriorProbability-2","PriorProbability-3","PriorProbability-4","PriorProbability-5","PriorProbability-6","PriorProbability-7","PriorProbability-8","PriorProbability-9","PriorProbability-10","PriorProbability-11","PriorProbability-12","PriorProbability-13","PriorProbability-14","PriorProbability-15","PosteriorProbability","SmoothingBW"]
k = 0
mcnt = 0
condcnt = 0
priorcnt = 0
while k < len(lines):
if lines[k] == "alternatives":
if priorcnt < 89:
priorcnt = priorcnt+1
else:
priorcnt = 0
mcnt = mcnt + 1
k = k + 1
alts = getReducedAlternatives(lines[k])
k = k + 1
smoothing_bw = lines[k].split("_")[1]
k = k + 1
priors = lines[k].split(",")
k = k + 1
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
print pairs
print k
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
# print priorcnt
# print len(items)
results.append([items[priorcnt],qud, ssize[j], alts, spopt, priors[0], priors[1], priors[2], priors[3], priors[4], priors[5], priors[6], priors[7], priors[8], priors[9], priors[10], priors[11], priors[12], priors[13], priors[14], priors[15], prob[j],smoothing_bw])
k = k + 1
elif lines[k].startswith("speaker-opt"):
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, priors[0], priors[1], priors[2], priors[3], priors[4], priors[5], priors[6], priors[7], priors[8], priors[9], priors[10], priors[11], priors[12], priors[13], priors[14], priors[15], prob[j],smoothing_bw])
k = k + 1
elif lines[k].startswith("qud"):
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
pairs = lines[k].split(",,")
print pairs
ssize = pairs[0].split(",")
prob = pairs[1].split(",")
for j in range(len(ssize)):
results.append([items[priorcnt],qud, ssize[j], alts, spopt, priors[0], priors[1], priors[2], priors[3], priors[4], priors[5], priors[6], priors[7], priors[8], priors[9], priors[10], priors[11], priors[12], priors[13], priors[14], priors[15], prob[j],smoothing_bw])
k = k + 1
else:
#print lines[k]
print "this shouldn't be happening"
#print results
for r in results:
inner_dict = dict(zip(headers,r))
wresults.append(inner_dict)
oname = '../results/data/parsed_marble_results.tsv'
#w = csv.DictWriter(open('../results-simulation/parsed/pragmatic-speaker-uniform.csv', 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
#w = csv.DictWriter(open('../results-simulation/parsed/pragmatic-speaker-simple.csv', 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
#w = csv.DictWriter(open('../results-simulation/parsed/pragmatic-speaker-partitive.csv', 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
w = csv.DictWriter(open(oname, 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
w.writeheader()
w.writerows(wresults)
| mit | -4,322,104,831,348,527,600 | 32.264 | 457 | 0.64911 | false |
mdrasmus/argweaver | argweaver/emit.py | 1 | 4373 | #
# HMM emission related functions
#
from math import exp, log
#=============================================================================
def parsimony_ancestral_seq(tree, seqs, pos):
"""Calculates ancestral sequence for a local tree using parsimony"""
ancestral = {}
sets = {}
# do unweight parsimony
for node in tree.postorder():
if node.is_leaf():
sets[node] = set([seqs[node.name][pos]])
else:
lset = sets[node.children[0]]
rset = sets[node.children[1]]
intersect = lset & rset
if len(intersect) > 0:
sets[node] = intersect
else:
sets[node] = lset | rset
# traceback
for node in tree.preorder():
s = sets[node]
if len(s) == 1 or not node.parents:
# NOTE: this technique is used to make assignment deterministic
ancestral[node.name] = ("A" if "A" in s else
"C" if "C" in s else
"G" if "G" in s else
"T")
else:
pchar = ancestral[node.parents[0].name]
if pchar in s:
ancestral[node.name] = pchar
else:
ancestral[node.name] = ("A" if "A" in s else
"C" if "C" in s else
"G" if "G" in s else
"T")
return ancestral
def calc_emission(tree, model, pos, new_name):
"""
Calculates emissions for all states at positions 'pos'
"""
mu = model.mu
seqs = model.seqs
mintime = model.time_steps[0]
emit = []
for node_name, timei in model.states[pos]:
node = tree[node_name]
time = model.times[timei]
local_site = parsimony_ancestral_seq(tree, seqs, pos)
# v = new chromosome
# x = current branch
# p = parent of current branch
if node.parents:
parent = node.parents[0]
parent_age = parent.age
if not parent.parents:
# unwrap top branch
c = parent.children
sib = (c[1] if node == c[0] else c[0])
v = seqs[new_name][pos]
x = local_site[node.name]
p = local_site[sib.name]
# modify (x,p) length to (x,p) + (sib,p)
parent_age = 2 * parent_age - sib.age
else:
v = seqs[new_name][pos]
x = local_site[node.name]
p = local_site[parent.name]
else:
parent = None
parent_age = None
# adjust time by unwrapping branch
time = 2 * time - node.age
v = seqs[new_name][pos]
x = local_site[node.name]
p = x
time = max(time, mintime)
if v == x == p:
# no mutation
emit.append(- mu * time)
elif v != p == x:
# mutation on v
emit.append(log(.3333 - .3333 * exp(-mu * time)))
elif v == p != x:
# mutation on x
t1 = max(parent_age - node.age, mintime)
t2 = max(time - node.age, mintime)
emit.append(log((1 - exp(-mu * t2)) / (1 - exp(-mu * t1))
* exp(-mu * (time + t2 - t1))))
elif v == x != p:
# mutation on (y,p)
t1 = max(parent_age - node.age, mintime)
t2 = max(parent_age - time, mintime)
emit.append(log((1 - exp(-mu * t2)) / (1 - exp(-mu * t1))
* exp(-mu * (time + t2 - t1))))
else:
# two mutations (v,x)
# mutation on x
if parent:
t1 = max(parent_age - node.age, mintime)
t2a = max(parent_age - time, mintime)
else:
t1 = max(model.times[-1] - node.age, mintime)
t2a = max(model.times[-1] - time, mintime)
t2b = max(time - node.age, mintime)
t2 = max(t2a, t2b)
t3 = time
emit.append(log((1 - exp(-mu * t2)) * (1 - exp(-mu * t3))
/ (1 - exp(-mu * t1))
* exp(-mu * (time + t2 + t3 - t1))))
return emit
| mit | -7,273,805,090,329,823,000 | 28.952055 | 78 | 0.430597 | false |
ragnraok/MonoReader | monoweb/mono/api/objects.py | 1 | 2368 | """
API object specification and correspond methods
"""
def fill_list_article_object(article_id, title, site, updated, cover_url, is_fav):
"""
list article object:
{
article_id: article_id,
title: title,
site: site_title,
updated: YYYY-MM-DD,
cover_url: url, may be None
is_fav: is_fav, boolean
}
"""
result = dict(article_id=article_id, title=title, site=site,
updated=updated.strftime("%Y-%m-%d"), cover_url=cover_url,
is_fav=is_fav)
return result
def fill_article_object(article_id, title, site, updated, content, url, cover_url,
is_fav):
"""
article object:
{
article_id: article_id,
title: title,
site: site_title,
updated: YYYY-MM-DD,
content: html content,
url: origin article url,
cover_url: url, may be null
is_fav: is_fav, boolean
}
"""
return dict(article_id=article_id, title=title, site=site,
updated=updated.strftime("%Y-%m-%d"), content=content, url=url,
cover_url=cover_url, is_fav=is_fav)
def fill_site_object(site_id, title, updated, url, category, is_read_daily, article_count,
is_un_classified):
"""
site object:
{
site_id: site_id,
title: title,
udpated: YYYY-MM-DD,
category: category,
is_fav: boolean,
article_count: article_count,
url: url
is_un_classified: boolean
}
"""
return dict(site_id=site_id, title=title, updated=updated.strftime("%Y-%m-%d"),
category=getattr(category, 'name', None),
is_fav=is_read_daily, article_count=article_count, url=url,
is_un_classified=is_un_classified)
def fill_category_object(category_id, name, is_un_classified):
"""
category object:
{
category_id: category_id,
name: category_name,
is_un_classified: boolean
}
"""
return dict(category_id=category_id, name=name, is_un_classified=is_un_classified)
def fill_change_date_object(timestamp):
"""
change date object:
{
timestamp: timestamp
}
"""
return dict(timestamp=timestamp)
| mit | -4,820,680,407,312,486,000 | 28.974684 | 90 | 0.549831 | false |
JarronL/pynrc | dev_utils/20200617_Shared_Package_shared/Wavefront Errors Tests/20200308_test_wavefront_errors.py | 1 | 21971 |
#def write_fits(path,array):
# from astropy.io import fits
# hdul = fits.PrimaryHDU(array)
# hdul.writeto(path,overwrite=True)
# return
def write_fits(path,array):
from astropy.io import fits
opd = '/Users/mygouf/Python/webbpsf/webbpsf-data4/NIRCam/OPD/OPD_RevW_ote_for_NIRCam_requirements.fits.gz'
hdul = fits.open(opd)
hdul[0].header['BUNIT']
hdu2 = fits.PrimaryHDU(array)
hdu2.header['BUNIT'] = 'micron'
fits.writeto(path, np.nan_to_num(hdu2.data*1e6),hdu2.header,overwrite=True)
return
def display_ote_and_psf(inst,ote, opd_vmax=500, psf_vmax=0.1, title="OPD and PSF", **kwargs):
import matplotlib.pyplot as plt
import webbpsf
psf = inst.calc_psf(monochromatic=2e-6,)
plt.figure(figsize=(12,8))
ax1=plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax,
colorbar_orientation='horizontal',
title='OPD modified for mirror moves') #, cbpad=0.05)
ax2=plt.subplot(122)
webbpsf.display_psf(psf, ext=1, vmax=psf_vmax, vmin=psf_vmax/1e4,
colorbar_orientation='horizontal',
title="PSF sim, 2 microns") #, cbpad=0.05)
plt.suptitle(title, fontsize=16)
def show_telescope_wfe(instr, ax=None, title=None, ticks=True, **kwargs):
if ax is None:
ax=plt.gca()
osys = instr._getOpticalSystem()
tel_wfe = osys.planes[0]
tel_wfe.display(what='opd', ax=ax,
colorbar_orientation='vertical',
**kwargs)
if title is None:
title=tel_wfe.name+" for\n"+instr.name
ax.set_title(title)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
def show_inst_wfe(instr, ax=None, **kwargs):
if ax is None:
plt.gca()
osys = instr._getOpticalSystem()
pupils = [p for p in osys.planes if p.planetype==poppy.poppy_core.PlaneType.pupil]
inst_wfe = pupils[-1]
inst_wfe.display(what='opd', ax=ax,
colorbar_orientation='vertical', **kwargs)
plt.title(inst_wfe.name.replace(',','\n')+ "field point")
def show_tel_inst_wfes(instr):
plt.figure(figsize=(12,4))
ax1 = plt.subplot(121)
show_telescope_wfe(instr, ax=ax1)
ax2 = plt.subplot(122)
show_inst_wfe(instr, ax=ax2)
return
def dist(yc,xc,y1,x1):
""" Returns the Euclidean distance between two points.
"""
return np.sqrt((yc-y1)**2+(xc-x1)**2)
def find_coords(rad, sep, init_angle, fin_angle):
angular_range = fin_angle-init_angle
npoints = (np.deg2rad(angular_range)*rad)/sep #(2*np.pi*rad)/sep
ang_step = angular_range/npoints #360/npoints
x = []
y = []
for i in range(int(npoints)):
newx = rad * np.cos(np.deg2rad(ang_step * i + init_angle))
newy = rad * np.sin(np.deg2rad(ang_step * i + init_angle))
x.append(newx)
y.append(newy)
return np.array(y), np.array(x)
def contrast_curve(data):
import matplotlib.pyplot as plt
import photutils
data_crop = data
fwhm = 4
wedge=(0,360)
init_angle, fin_angle = wedge
init_rad=fwhm
NP = data_crop.shape[0]
print(NP)
array = data_crop
centery, centerx = np.array([NP/2,NP/2])
separation = 1.1
separation = 0.5
n_annuli = int(np.floor((centery)/separation))
x = centerx
y = centery
total = []
mean = []
noise = []
vector_radd = []
#plt.figure(figsize=(5,5))
#vmin,vmax=np.min(data_crop),np.max(data_crop)
#plt.imshow(data_crop, cmap='CMRmap', origin = 'lower', vmin = vmin , vmax = vmax)
for i in range(n_annuli-1):
y = centery + init_rad + separation*(i)
rad = dist(centery, centerx, y, x)
yy, xx = find_coords(rad, fwhm, init_angle, fin_angle)
yy += centery
xx += centerx
apertures = photutils.CircularAperture((xx, yy), fwhm/2.)
#fluxes = photutils.aperture_photometry(array, apertures,mask = stis_mask)
fluxes = photutils.aperture_photometry(array, apertures)
fluxes = np.array(fluxes['aperture_sum'])
noise_ann = np.std(fluxes)
noise.append(noise_ann)
vector_radd.append(rad)
mean_ann = np.mean(fluxes)
mean.append(mean_ann)
nb_apertures = apertures.positions.shape[0]
total_ann = np.sum(fluxes)/nb_apertures
total.append(total_ann)
#print(total_ann)
if i <= 9:
apertures.plot(color='blue', lw=1.5, alpha=0.5)
#plt.show()
total0 = np.array(total)
mean0 = np.array(mean)
noise0 = np.array(noise)
vector_rad0 = np.array(vector_radd)
total0_stis = np.array(total)
mean0_stis = np.array(mean)
noise0_stis = np.array(noise)
vector_rad0_stis = np.array(vector_radd)
return noise0_stis, vector_rad0_stis
def generate_wavefront_errors(nb_of_maps,errors,nb_zernikes,path):
import poppy,webbpsf
import random
import matplotlib.pyplot as plt
# intial wavefront map
nc = webbpsf.NIRCam()
nc, ote = webbpsf.enable_adjustable_ote(nc)
osys = nc._get_aberrations()
# perturbed wavefront map
nc_perturb = webbpsf.NIRCam()
nc_perturb, ote_perturb = webbpsf.enable_adjustable_ote(nc_perturb)
osys_perturb = nc_perturb._get_aberrations()
# final wavefront map
nc_final = webbpsf.NIRCam()
nc_final, ote_final = webbpsf.enable_adjustable_ote(nc_final)
osys_final = nc_final._get_aberrations()
tab_opd_final = []
for n, error in zip(range(nb_of_maps), errors):
print(n, error)
# change aberrations in wavefront map: example with random zernikes
# this map will be our perturbation map and we will add it to the initial map with a certain weight
# creating the perturbation map
#weight = 0.2
weight = error/100
for i in range(nb_zernikes):
#tmp = random.randint(-10,10)
tmp = random.randint(-1,1)
osys_perturb.zernike_coeffs[i] = weight*tmp*osys.zernike_coeffs[i]
osys_final.zernike_coeffs[i] = osys.zernike_coeffs[i] + weight*tmp*osys.zernike_coeffs[i]
# implementing and displaying the wavefront maps
#display_ote_and_psf(nc, ote, title="Initial OPD and PSF")
ote_perturb.reset()
ote_perturb.move_global_zernikes(osys_perturb.zernike_coeffs[0:10])
#display_ote_and_psf(nc_perturb, ote_perturb, title="Perturbed OPD and PSF")
ote_final.reset()
ote_final.move_global_zernikes(osys_final.zernike_coeffs[0:10])
#display_ote_and_psf(nc_final, ote_final, title="Final OPD and PSF")
rms = ote.rms()
rms_perturb = ote_perturb.rms()
rms_final = ote_final.rms()
print(rms,rms_perturb,rms_final)
print('')
#print(osys.zernike_coeffs)
#print('')
#print(osys_perturb.zernike_coeffs)
#print('')
#print(osys_final.zernike_coeffs)
#print('')
opd = poppy.zernike.opd_from_zernikes(osys.zernike_coeffs[0:10],
npix=1024, basis=poppy.zernike.zernike_basis_faster)
opd_perturb = poppy.zernike.opd_from_zernikes(osys_perturb.zernike_coeffs[0:10],
npix=1024, basis=poppy.zernike.zernike_basis_faster)
opd_final = poppy.zernike.opd_from_zernikes(osys_final.zernike_coeffs[0:10],
npix=1024, basis=poppy.zernike.zernike_basis_faster)
#tab_opd_final.append(opd_final)
write_fits(path+'_opd'+str(n)+'.fits',opd)
write_fits(path+'_opd_perturb'+str(n)+'.fits',opd_perturb)
write_fits(path+'_opd_final'+str(n)+'.fits',opd_final)
#plt.figure(figsize=(12,4))
#ax1 = plt.subplot(131)
#ax1.imshow(opd)
#ax1.set_title('initial wavefront map')
#ax2 = plt.subplot(132)
#ax2.imshow(opd_perturb)
#ax2.set_title('perturbed wavefront map')
#ax3 = plt.subplot(133)
#ax3.imshow(opd_final)
#ax3.set_title('sum of maps')
#plt.show()
wavefront_error = mse(np.nan_to_num(opd), np.nan_to_num(opd_final))
print('mse',error,wavefront_error)
print("MSE: %.2f" % (wavefront_error*100))
return tab_opd_final
def mse(image, reference):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
import numpy as np
n_points = float(image.shape[0] * image.shape[1])
err = np.sum((image.astype("float") - reference.astype("float")) ** 2)
err /= n_points
err = np.sqrt(err)
#norm = 1
#norm = np.sum(reference) / n_points
norm = np.sqrt(np.sum((np.abs(reference))**2)/n_points)
print('erreur:',err/norm)
# return the MSE, the lower the error, the more "similar"
# the two images are
return err/norm
def compare_images(imageA, imageB, title):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
#s = ssim(imageA, imageB)
# setup the figure
fig = plt.figure(title)
plt.suptitle("MSE: %.2f" % (m))
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(imageA, cmap = plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(imageB, cmap = plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
return m
def generate_wavefront_errors_correction(nb_of_maps,errors,nb_zernikes):
import poppy, webbpsf
import matplotlib.pyplot as plt
# intial wavefront map
nc = webbpsf.NIRCam()
nc, ote = webbpsf.enable_adjustable_ote(nc)
osys = nc._get_aberrations()
# final wavefront map
nc_final = webbpsf.NIRCam()
nc_final, ote_final = webbpsf.enable_adjustable_ote(nc_final)
osys_final = nc_final._get_aberrations()
tab_wavefront_error = np.zeros(nb_of_maps)
tab_error = np.zeros(nb_of_maps)
for n, error in zip(range(nb_of_maps), errors):
print(n, error)
#print(zip(range(nb_of_maps)))
#print(errors)
# change aberrations in wavefront map: example with random zernikes
# this map will be our perturbation map and we will add it to the initial map with a certain weight
# creating the perturbation map
#weight = 0.2
#weight = error/100
osys_corrected = osys.zernike_coeffs.copy()
for i in range(nb_zernikes):
if i<error+1:
osys_corrected[i] = 0
print(osys.zernike_coeffs)
print(osys_corrected)
opd = poppy.zernike.opd_from_zernikes(osys.zernike_coeffs,
npix=1024, basis=poppy.zernike.zernike_basis_faster)
opd_corrected = poppy.zernike.opd_from_zernikes(osys_corrected,
npix=1024, basis=poppy.zernike.zernike_basis_faster)
wavefront_error = mse(np.nan_to_num(opd), np.nan_to_num(opd_corrected))
print('mse',error,wavefront_error)
#tab_opd_final.append(opd_final)
#write_fits('_opd'+str(n)+'.fits',opd)
#write_fits('_opd_corrected'+str(n)+'.fits',opd_corrected)
plt.figure(figsize=(12,4))
ax1 = plt.subplot(131)
#ax1.imshow(opd,vmin=np.min(opd),vmax=np.max(opd))
ax1.imshow(opd)
ax1.set_title('initial wavefront map')
ax2 = plt.subplot(132)
#ax2.imshow(opd_corrected,vmin=np.min(opd),vmax=np.max(opd))
ax2.imshow(opd_corrected)
ax2.set_title('corrected wavefront map')
ax3 = plt.subplot(133)
ax3.imshow(opd - opd_corrected)
ax3.set_title('sum of maps')
plt.show()
tab_wavefront_error[n] = wavefront_error
tab_error[n] = error
return tab_wavefront_error, tab_error
def test_wavefront_errors():
# Import packages
#################
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from datetime import date
from datetime import datetime
from astropy.io import fits
import random
import photutils
from Simulator import Simulation
#from Estimation import Estimation
os.environ['WEBBPSF_PATH'] = "/Users/mygouf/Python/webbpsf/my_webbpsf-data3"
os.environ['PYSYN_CDBS'] = "/Users/mygouf/git/pynrc/cdbs/"
import poppy
import webbpsf
# Set up directories
####################
today = date.today()
test_date = date = today.strftime("%Y%m%d")
print(test_date)
tests_directory = './Tests/'+test_date+'/'
if not os.path.exists(tests_directory):
os.makedirs(tests_directory)
directory = tests_directory+test_date+'_wavefront_errors/'
directory1 = directory
if not os.path.exists(directory):
os.makedirs(directory)
path = directory+date
# Parameters simulation images
##############################
#transmission = '/Users/mygouf/Python/webbpsf/webbpsf-data4/jwst_pupil_RevW_npix1024.fits.gz'
#opd = '/Users/mygouf/Python/webbpsf/webbpsf-data4/NIRCam/OPD/OPD_RevW_ote_for_NIRCam_requirements.fits.gz'
# poppy paramaters
pixelscale = 0.063
fov_arcsec = 10
#oversample = 4
#wavelength = 4.441e-6
# webbPSF parameters
#filt = 'F444W'
# Generate wavefront errors
###########################
nb_of_maps = 10
errors = [1,2,3,4,5,10,20,30,40,50]
#nb_of_maps = 2
#errors = [1,2]
nb_zernikes = 35
tab_opd_final = generate_wavefront_errors(nb_of_maps,errors,nb_zernikes,path)
# Generating images with those wavefronts
#########################################
dict_simulation_parameters = {'fov_arcsec': fov_arcsec}
simulation = Simulation(dict_simulation_parameters)
tab_images_initial = np.zeros((nb_of_maps,636,636))
tab_images_final = np.zeros((nb_of_maps,636,636))
for i in range(nb_of_maps):
dict_initial = simulation.create_image_from_opd_file(opd=path+'_opd'+str(i)+'.fits', input_noise=None)
dict_final = simulation.create_image_from_opd_file(opd=path+'_opd_final'+str(i)+'.fits', input_noise=None)
image_initial0 = dict_initial['image']
image_final0 = dict_final['image']
tab_images_initial[i] = image_initial0
tab_images_final[i] = image_final0
# Compute contrast curves
#########################
contrast_initial_image1, vector_rad_initial_image1 = contrast_curve(tab_images_initial[0])
contrast_final_image1, vector_rad_final_image1 = contrast_curve(tab_images_initial[0]-tab_images_final[0])
contrast_initial_image2, vector_rad_initial_image2 = contrast_curve(tab_images_initial[1])
contrast_final_image2, vector_rad_final_image2 = contrast_curve(tab_images_initial[1]-tab_images_final[1])
contrast_initial_image3, vector_rad_initial_image3 = contrast_curve(tab_images_initial[2])
contrast_final_image3, vector_rad_final_image3 = contrast_curve(tab_images_initial[2]-tab_images_final[2])
contrast_initial_image4, vector_rad_initial_image4 = contrast_curve(tab_images_initial[3])
contrast_final_image4, vector_rad_final_image4 = contrast_curve(tab_images_initial[3]-tab_images_final[3])
contrast_initial_image5, vector_rad_initial_image5 = contrast_curve(tab_images_initial[4])
contrast_final_image5, vector_rad_final_image5 = contrast_curve(tab_images_initial[4]-tab_images_final[4])
contrast_initial_image6, vector_rad_initial_image6 = contrast_curve(tab_images_initial[5])
contrast_final_image6, vector_rad_final_image6 = contrast_curve(tab_images_initial[5]-tab_images_final[5])
contrast_initial_image7, vector_rad_initial_image7 = contrast_curve(tab_images_initial[6])
contrast_final_image7, vector_rad_final_image7 = contrast_curve(tab_images_initial[6]-tab_images_final[6])
contrast_initial_image8, vector_rad_initial_image8 = contrast_curve(tab_images_initial[7])
contrast_final_image8, vector_rad_final_image8 = contrast_curve(tab_images_initial[7]-tab_images_final[7])
contrast_initial_image9, vector_rad_initial_image9 = contrast_curve(tab_images_initial[8])
contrast_final_image9, vector_rad_final_image9 = contrast_curve(tab_images_initial[8]-tab_images_final[8])
contrast_initial_image10, vector_rad_initial_image10 = contrast_curve(tab_images_initial[9])
contrast_final_image10, vector_rad_final_image10 = contrast_curve(tab_images_initial[9]-tab_images_final[9])
# Create and saving figures
###########################
pxscale = pixelscale
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(111)
plt.plot(vector_rad_initial_image1*pxscale, contrast_initial_image1/np.max(tab_images_initial[0]), label='Raw')
plt.plot(vector_rad_final_image1*pxscale, contrast_final_image1/np.max(tab_images_initial[0]), label='1% error',linestyle='--')
plt.plot(vector_rad_final_image5*pxscale, contrast_final_image5/np.max(tab_images_initial[4]), label='5% error',linestyle='--')
plt.plot(vector_rad_final_image6*pxscale, contrast_final_image6/np.max(tab_images_initial[5]), label='10% error',linestyle='--')
plt.plot(vector_rad_final_image10*pxscale, contrast_final_image10/np.max(tab_images_initial[9]), label='50% error',linestyle='--')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Contrast')
plt.grid('on', which='both', alpha=0.2, linestyle='solid')
ax1.set_yscale('log')
ax1.set_xlim(0, 4)
plt.legend()
plt.show()
fname = path+'_contrast_curves.pdf'
fig.savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, pad_inches=0.1,
frameon=None, metadata=None,bbox_inches = 'tight')
print('Saving file:',fname)
# Create and saving figures
###########################
contrast_raw = contrast_initial_image1/np.max(tab_images_initial[0])
contrast1 = contrast_final_image1/np.max(tab_images_initial[0])
contrast5 = contrast_final_image5/np.max(tab_images_initial[4])
contrast6 = contrast_final_image6/np.max(tab_images_initial[5])
contrast10 = contrast_final_image10/np.max(tab_images_initial[9])
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(111)
plt.plot(vector_rad_initial_image1*pxscale, contrast_raw/contrast_initial_image1/np.max(tab_images_initial[0]), label='Raw')
plt.plot(vector_rad_final_image1*pxscale, contrast_raw/contrast1, label='1% error',linestyle='--')
plt.plot(vector_rad_final_image5*pxscale, contrast_raw/contrast5, label='5% error',linestyle='--')
plt.plot(vector_rad_final_image6*pxscale, contrast_raw/contrast6, label='10% error',linestyle='--')
plt.plot(vector_rad_final_image10*pxscale, contrast_raw/contrast10, label='50% error',linestyle='--')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Contrast Gain')
plt.grid('on', which='both', alpha=0.2, linestyle='solid')
ax1.set_xlim(0, 4)
ax1.set_ylim(0, 50)
ax1.hlines([0,10], 0, 10, colors='k', linestyles='solid', label='', data=None)
plt.legend()
plt.show()
fname = path+'_contrast_gain_curves.pdf'
fig.savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, pad_inches=0.1,
frameon=None, metadata=None,bbox_inches = 'tight')
print('Saving file:',fname)
# How many Zernike coeff. does that correspond to?
##################################################
nb_zernikes = 36
errors = np.linspace(0,nb_zernikes,nb_zernikes+1) # actually number of corrected zernike coefficients
nb_of_maps = len(errors)
print(nb_of_maps)
print(errors)
# nb_of_maps = 2
# errors = [1,36]
# nb_of_maps = 1
# errors = [1]
tab_wavefront_error, tab_error = generate_wavefront_errors_correction(nb_of_maps,errors,nb_zernikes)
print('Number of corrected Zernike coefficients:',tab_error)
print('Error in percent:',tab_wavefront_error)
# Save fits files
#################
#
if __name__ == "__main__":
from datetime import date
from astropy.io import fits
import os
import numpy as np
test_wavefront_errors()
today = date.today()
date = test_date = today.strftime("%Y%m%d")
date_ref = '20200308'
tests_directory = './Tests/'+test_date+'/'
if not os.path.exists(tests_directory):
os.makedirs(tests_directory)
directory = tests_directory+test_date+'_wavefront_errors/'
if not os.path.exists(directory):
os.makedirs(directory)
#filename = directory+date+test+'_Zernike_coefficients_estimation.fits'
#filename_ref = '../Reference_Tests/'+date_ref+'_wavefront_errors/'+date_ref+'_wavefront_errors.fits'
#hdul = fits.open(filename)
#hdul_ref = fits.open(filename)
#image = hdul[0].data
#image_ref = hdul[0].data
#diff1 = np.sum(image-image_ref)
#filename = directory+date+test+'_Zernike_coefficients_estimation.fits'
#filename_ref = '../Reference_Tests/'+date_ref+'_wavefront_errors/'+date_ref+'_psf_webbpsf.fits'
#hdul = fits.open(filename)
#hdul_ref = fits.open(filename)
#image = hdul[0].data
#image_ref = hdul[0].data
#diff1 = np.sum(image-image_ref)
#print(diff1)
#if np.sum([diff1,diff2]) == 0:
# print("Test 'Wavefront errors' passed")
#else:
# print("Test 'Wavefront errors' not passed")
| mit | -6,529,336,291,847,437,000 | 34.959083 | 134 | 0.621956 | false |
googleapis/python-spanner | tests/unit/test_snapshot.py | 1 | 53885 | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core import gapic_v1
import mock
from google.cloud.spanner_v1 import RequestOptions
from tests._helpers import (
OpenTelemetryBase,
StatusCode,
HAS_OPENTELEMETRY_INSTALLED,
)
from google.cloud.spanner_v1.param_types import INT64
from google.api_core.retry import Retry
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": INT64}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"DEADBEEF"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"db.instance": "testing",
"net.host.name": "spanner.googleapis.com",
}
class Test_restart_on_unavailable(OpenTelemetryBase):
def _call_fut(
self, restart, request, span_name=None, session=None, attributes=None
):
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
return _restart_on_unavailable(restart, request, span_name, session, attributes)
def _make_item(self, value, resume_token=b""):
return mock.Mock(
value=value, resume_token=resume_token, spec=["value", "resume_token"]
)
def test_iteration_w_empty_raw(self):
raw = _MockIterator()
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), [])
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_non_empty_raw(self):
ITEMS = (self._make_item(0), self._make_item(1))
raw = _MockIterator(*ITEMS)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_raw_w_resume_tken(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
self._make_item(3),
)
raw = _MockIterator(*ITEMS)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_raw_raising_unavailable_no_token(self):
from google.api_core.exceptions import ServiceUnavailable
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True, error=ServiceUnavailable("testing"))
after = _MockIterator(*ITEMS)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, b"")
self.assertNoSpans()
def test_iteration_w_raw_raising_retryable_internal_error_no_token(self):
from google.api_core.exceptions import InternalServerError
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(
fail_after=True,
error=InternalServerError(
"Received unexpected EOS on DATA frame from server"
),
)
after = _MockIterator(*ITEMS)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, b"")
self.assertNoSpans()
def test_iteration_w_raw_raising_non_retryable_internal_error_no_token(self):
from google.api_core.exceptions import InternalServerError
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True, error=InternalServerError("testing"))
after = _MockIterator(*ITEMS)
request = mock.Mock(spec=["resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
with self.assertRaises(InternalServerError):
list(resumable)
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_raw_raising_unavailable(self):
from google.api_core.exceptions import ServiceUnavailable
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(
*(FIRST + SECOND), fail_after=True, error=ServiceUnavailable("testing")
)
after = _MockIterator(*LAST)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, RESUME_TOKEN)
self.assertNoSpans()
def test_iteration_w_raw_raising_retryable_internal_error(self):
from google.api_core.exceptions import InternalServerError
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(
*(FIRST + SECOND),
fail_after=True,
error=InternalServerError(
"Received unexpected EOS on DATA frame from server"
)
)
after = _MockIterator(*LAST)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, RESUME_TOKEN)
self.assertNoSpans()
def test_iteration_w_raw_raising_non_retryable_internal_error(self):
from google.api_core.exceptions import InternalServerError
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(
*(FIRST + SECOND), fail_after=True, error=InternalServerError("testing")
)
after = _MockIterator(*LAST)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
with self.assertRaises(InternalServerError):
list(resumable)
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_raw_raising_unavailable_after_token(self):
from google.api_core.exceptions import ServiceUnavailable
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(
*FIRST, fail_after=True, error=ServiceUnavailable("testing")
)
after = _MockIterator(*SECOND)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, RESUME_TOKEN)
self.assertNoSpans()
def test_iteration_w_raw_raising_retryable_internal_error_after_token(self):
from google.api_core.exceptions import InternalServerError
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(
*FIRST,
fail_after=True,
error=InternalServerError(
"Received unexpected EOS on DATA frame from server"
)
)
after = _MockIterator(*SECOND)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, RESUME_TOKEN)
self.assertNoSpans()
def test_iteration_w_raw_raising_non_retryable_internal_error_after_token(self):
from google.api_core.exceptions import InternalServerError
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(
*FIRST, fail_after=True, error=InternalServerError("testing")
)
after = _MockIterator(*SECOND)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart, request)
with self.assertRaises(InternalServerError):
list(resumable)
restart.assert_called_once_with(request=request)
self.assertNoSpans()
def test_iteration_w_span_creation(self):
name = "TestSpan"
extra_atts = {"test_att": 1}
raw = _MockIterator()
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(
restart, request, name, _Session(_Database()), extra_atts
)
self.assertEqual(list(resumable), [])
self.assertSpanAttributes(name, attributes=dict(BASE_ATTRIBUTES, test_att=1))
def test_iteration_w_multiple_span_creation(self):
from google.api_core.exceptions import ServiceUnavailable
if HAS_OPENTELEMETRY_INSTALLED:
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(
*(FIRST + SECOND), fail_after=True, error=ServiceUnavailable("testing")
)
after = _MockIterator(*LAST)
request = mock.Mock(test="test", spec=["test", "resume_token"])
restart = mock.Mock(spec=[], side_effect=[before, after])
name = "TestSpan"
resumable = self._call_fut(restart, request, name, _Session(_Database()))
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(len(restart.mock_calls), 2)
self.assertEqual(request.resume_token, RESUME_TOKEN)
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 2)
for span in span_list:
self.assertEqual(span.name, name)
self.assertEqual(
dict(span.attributes),
{
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"db.instance": "testing",
"net.host.name": "spanner.googleapis.com",
},
)
class Test_SnapshotBase(OpenTelemetryBase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import _SnapshotBase
return _SnapshotBase
def _make_one(self, session):
return self._getTargetClass()(session)
def _makeDerived(self, session):
class _Derived(self._getTargetClass()):
_transaction_id = None
_multi_use = False
def _make_txn_selector(self):
from google.cloud.spanner_v1 import (
TransactionOptions,
TransactionSelector,
)
if self._transaction_id:
return TransactionSelector(id=self._transaction_id)
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if self._multi_use:
return TransactionSelector(begin=options)
return TransactionSelector(single_use=options)
return _Derived(session)
def _make_spanner_api(self):
from google.cloud.spanner_v1 import SpannerClient
return mock.create_autospec(SpannerClient, instance=True)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(base._execute_sql_count, 0)
self.assertNoSpans()
def test__make_txn_selector_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._make_txn_selector()
def test_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.streaming_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.read(TABLE_NAME, COLUMNS, keyset))
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
status=StatusCode.ERROR,
attributes=dict(
BASE_ATTRIBUTES, table_id=TABLE_NAME, columns=tuple(COLUMNS)
),
)
def _read_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
timeout=gapic_v1.method.DEFAULT,
retry=gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1 import ReadRequest
from google.cloud.spanner_v1 import Type, StructType
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1._helpers import _make_value_pb
VALUES = [[u"bharney", 31], [u"phred", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
struct_type_pb = StructType(
fields=[
StructType.Field(name="name", type_=Type(code=TypeCode.STRING)),
StructType.Field(name="age", type_=Type(code=TypeCode.INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(metadata=metadata_pb),
PartialResultSet(stats=stats_pb),
]
for i in range(len(result_sets)):
result_sets[i].values.extend(VALUE_PBS[i])
KEYS = [["[email protected]"], ["[email protected]"]]
keyset = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.streaming_read.return_value = _MockIterator(*result_sets)
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
if not first:
derived._transaction_id = TXN_ID
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
TABLE_NAME,
COLUMNS,
keyset,
index=INDEX,
partition=partition,
retry=retry,
timeout=timeout,
)
else:
result_set = derived.read(
TABLE_NAME,
COLUMNS,
keyset,
index=INDEX,
limit=LIMIT,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
if partition is not None:
expected_limit = 0
else:
expected_limit = LIMIT
expected_request = ReadRequest(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_transaction,
index=INDEX,
limit=expected_limit,
partition_token=partition,
)
api.streaming_read.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
retry=retry,
timeout=timeout,
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES, table_id=TABLE_NAME, columns=tuple(COLUMNS)
),
)
def test_read_wo_multi_use(self):
self._read_helper(multi_use=False)
def test_read_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=False, count=1)
def test_read_w_multi_use_wo_first(self):
self._read_helper(multi_use=True, first=False)
def test_read_w_multi_use_wo_first_w_count_gt_0(self):
self._read_helper(multi_use=True, first=False, count=1)
def test_read_w_multi_use_w_first_w_partition(self):
PARTITION = b"FADEABED"
self._read_helper(multi_use=True, first=True, partition=PARTITION)
def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
def test_read_w_timeout_param(self):
self._read_helper(multi_use=True, first=False, timeout=2.0)
def test_read_w_retry_param(self):
self._read_helper(multi_use=True, first=False, retry=Retry(deadline=60))
def test_read_w_timeout_and_retry_params(self):
self._read_helper(
multi_use=True, first=False, retry=Retry(deadline=60), timeout=2.0
)
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.execute_streaming_sql.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.execute_sql(SQL_QUERY))
self.assertEqual(derived._execute_sql_count, 1)
self.assertSpanAttributes(
"CloudSpanner.ReadWriteTransaction",
status=StatusCode.ERROR,
attributes=dict(BASE_ATTRIBUTES, **{"db.statement": SQL_QUERY}),
)
def test_execute_sql_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(ValueError):
derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS)
self.assertNoSpans()
def _execute_sql_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
sql_count=0,
query_options=None,
request_options=None,
timeout=gapic_v1.method.DEFAULT,
retry=gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1 import ExecuteSqlRequest
from google.cloud.spanner_v1 import Type, StructType
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
)
VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
MODE = 2 # PROFILE
struct_type_pb = StructType(
fields=[
StructType.Field(name="first_name", type_=Type(code=TypeCode.STRING)),
StructType.Field(name="last_name", type_=Type(code=TypeCode.STRING)),
StructType.Field(name="age", type_=Type(code=TypeCode.INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(metadata=metadata_pb),
PartialResultSet(stats=stats_pb),
]
for i in range(len(result_sets)):
result_sets[i].values.extend(VALUE_PBS[i])
iterator = _MockIterator(*result_sets)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.execute_streaming_sql.return_value = iterator
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
derived._execute_sql_count = sql_count
if not first:
derived._transaction_id = TXN_ID
result_set = derived.execute_sql(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
request_options=request_options,
partition=partition,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_query_options = database._instance._client._query_options
if query_options:
expected_query_options = _merge_query_options(
expected_query_options, query_options
)
expected_request = ExecuteSqlRequest(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_transaction,
params=expected_params,
param_types=PARAM_TYPES,
query_mode=MODE,
query_options=expected_query_options,
request_options=request_options,
partition_token=partition,
seqno=sql_count,
)
api.execute_streaming_sql.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
timeout=timeout,
retry=retry,
)
self.assertEqual(derived._execute_sql_count, sql_count + 1)
self.assertSpanAttributes(
"CloudSpanner.ReadWriteTransaction",
status=StatusCode.OK,
attributes=dict(BASE_ATTRIBUTES, **{"db.statement": SQL_QUERY_WITH_PARAM}),
)
def test_execute_sql_wo_multi_use(self):
self._execute_sql_helper(multi_use=False)
def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=False, count=1)
def test_execute_sql_w_multi_use_wo_first(self):
self._execute_sql_helper(multi_use=True, first=False, sql_count=1)
def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self):
self._execute_sql_helper(multi_use=True, first=False, count=1)
def test_execute_sql_w_multi_use_w_first(self):
self._execute_sql_helper(multi_use=True, first=True)
def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=True, first=True, count=1)
def test_execute_sql_w_retry(self):
self._execute_sql_helper(multi_use=False, retry=None)
def test_execute_sql_w_timeout(self):
self._execute_sql_helper(multi_use=False, timeout=None)
def test_execute_sql_w_query_options(self):
from google.cloud.spanner_v1 import ExecuteSqlRequest
self._execute_sql_helper(
multi_use=False,
query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"),
)
def test_execute_sql_w_request_options(self):
self._execute_sql_helper(
multi_use=False,
request_options=RequestOptions(
priority=RequestOptions.Priority.PRIORITY_MEDIUM
),
)
def _partition_read_helper(
self,
multi_use,
w_txn,
size=None,
max_partitions=None,
index=None,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1 import Partition
from google.cloud.spanner_v1 import PartitionOptions
from google.cloud.spanner_v1 import PartitionReadRequest
from google.cloud.spanner_v1 import PartitionResponse
from google.cloud.spanner_v1 import Transaction
from google.cloud.spanner_v1 import TransactionSelector
keyset = KeySet(all_=True)
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_read.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_read(
TABLE_NAME,
COLUMNS,
keyset,
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
retry=retry,
timeout=timeout,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
expected_request = PartitionReadRequest(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_txn_selector,
index=index,
partition_options=expected_partition_options,
)
api.partition_read.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
retry=retry,
timeout=timeout,
)
self.assertSpanAttributes(
"CloudSpanner.PartitionReadOnlyTransaction",
status=StatusCode.OK,
attributes=dict(
BASE_ATTRIBUTES, table_id=TABLE_NAME, columns=tuple(COLUMNS)
),
)
def test_partition_read_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=False, w_txn=True)
def test_partition_read_wo_existing_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=True, w_txn=False)
def test_partition_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_read(TABLE_NAME, COLUMNS, keyset))
self.assertSpanAttributes(
"CloudSpanner.PartitionReadOnlyTransaction",
status=StatusCode.ERROR,
attributes=dict(
BASE_ATTRIBUTES, table_id=TABLE_NAME, columns=tuple(COLUMNS)
),
)
def test_partition_read_ok_w_index_no_options(self):
self._partition_read_helper(multi_use=True, w_txn=True, index="index")
def test_partition_read_ok_w_size(self):
self._partition_read_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
def test_partition_read_ok_w_timeout_param(self):
self._partition_read_helper(multi_use=True, w_txn=True, timeout=2.0)
def test_partition_read_ok_w_retry_param(self):
self._partition_read_helper(
multi_use=True, w_txn=True, retry=Retry(deadline=60)
)
def test_partition_read_ok_w_timeout_and_retry_params(self):
self._partition_read_helper(
multi_use=True, w_txn=True, retry=Retry(deadline=60), timeout=2.0
)
def _partition_query_helper(
self,
multi_use,
w_txn,
size=None,
max_partitions=None,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1 import Partition
from google.cloud.spanner_v1 import PartitionOptions
from google.cloud.spanner_v1 import PartitionQueryRequest
from google.cloud.spanner_v1 import PartitionResponse
from google.cloud.spanner_v1 import Transaction
from google.cloud.spanner_v1 import TransactionSelector
from google.cloud.spanner_v1._helpers import _make_value_pb
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_query.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_query(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
retry=retry,
timeout=timeout,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
expected_request = PartitionQueryRequest(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_txn_selector,
params=expected_params,
param_types=PARAM_TYPES,
partition_options=expected_partition_options,
)
api.partition_query.assert_called_once_with(
request=expected_request,
metadata=[("google-cloud-resource-prefix", database.name)],
retry=retry,
timeout=timeout,
)
self.assertSpanAttributes(
"CloudSpanner.PartitionReadWriteTransaction",
status=StatusCode.OK,
attributes=dict(BASE_ATTRIBUTES, **{"db.statement": SQL_QUERY_WITH_PARAM}),
)
def test_partition_query_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_query.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_query(SQL_QUERY))
self.assertSpanAttributes(
"CloudSpanner.PartitionReadWriteTransaction",
status=StatusCode.ERROR,
attributes=dict(BASE_ATTRIBUTES, **{"db.statement": SQL_QUERY}),
)
def test_partition_query_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(ValueError):
list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS))
self.assertNoSpans()
def test_partition_query_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=False, w_txn=True)
def test_partition_query_wo_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=True, w_txn=False)
def test_partition_query_ok_w_index_no_options(self):
self._partition_query_helper(multi_use=True, w_txn=True)
def test_partition_query_ok_w_size(self):
self._partition_query_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
def test_partition_query_ok_w_timeout_param(self):
self._partition_query_helper(multi_use=True, w_txn=True, timeout=2.0)
def test_partition_query_ok_w_retry_param(self):
self._partition_query_helper(
multi_use=True, w_txn=True, retry=Retry(deadline=30)
)
def test_partition_query_ok_w_timeout_and_retry_params(self):
self._partition_query_helper(
multi_use=True, w_txn=True, retry=Retry(deadline=60), timeout=2.0
)
class TestSnapshot(OpenTelemetryBase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import Snapshot
return Snapshot
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _make_spanner_api(self):
from google.cloud.spanner_v1 import SpannerClient
return mock.create_autospec(SpannerClient, instance=True)
def _makeTimestamp(self):
import datetime
from google.cloud._helpers import UTC
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def _makeDuration(self, seconds=1, microseconds=0):
import datetime
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
def test_ctor_defaults(self):
session = _Session()
snapshot = self._make_one(session)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multiple_options(self):
timestamp = self._makeTimestamp()
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, read_timestamp=timestamp, max_staleness=duration)
def test_ctor_w_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertEqual(snapshot._min_read_timestamp, timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_max_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertEqual(snapshot._max_staleness, duration)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, min_read_timestamp=timestamp, multi_use=True)
def test_ctor_w_multi_use_and_max_staleness(self):
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, max_staleness=duration, multi_use=True)
def test_ctor_w_multi_use_and_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertTrue(snapshot._multi_use)
def test__make_txn_selector_w_transaction_id(self):
session = _Session()
snapshot = self._make_one(session)
snapshot._transaction_id = TXN_ID
selector = snapshot._make_txn_selector()
self.assertEqual(selector.id, TXN_ID)
def test__make_txn_selector_strong(self):
session = _Session()
snapshot = self._make_one(session)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(
type(options).pb(options).read_only.read_timestamp
),
timestamp,
)
def test__make_txn_selector_w_min_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(
type(options).pb(options).read_only.min_read_timestamp
),
timestamp,
)
def test__make_txn_selector_w_max_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(type(options).pb(options).read_only.max_staleness.seconds, 3)
self.assertEqual(
type(options).pb(options).read_only.max_staleness.nanos, 123456000
)
def test__make_txn_selector_w_exact_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(type(options).pb(options).read_only.exact_staleness.seconds, 3)
self.assertEqual(
type(options).pb(options).read_only.exact_staleness.nanos, 123456000
)
def test__make_txn_selector_strong_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp_w_multi_use(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(
_pb_timestamp_to_datetime(
type(options).pb(options).read_only.read_timestamp
),
timestamp,
)
def test__make_txn_selector_w_exact_staleness_w_multi_use(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(type(options).pb(options).read_only.exact_staleness.seconds, 3)
self.assertEqual(
type(options).pb(options).read_only.exact_staleness.nanos, 123456000
)
def test_begin_wo_multi_use(self):
session = _Session()
snapshot = self._make_one(session)
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_read_request_count_gt_0(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._read_request_count = 1
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_existing_txn_id(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._transaction_id = TXN_ID
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.begin_transaction.side_effect = RuntimeError()
timestamp = self._makeTimestamp()
session = _Session(database)
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
with self.assertRaises(RuntimeError):
snapshot.begin()
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
status=StatusCode.ERROR,
attributes=BASE_ATTRIBUTES,
)
def test_begin_ok_exact_staleness(self):
from google.protobuf.duration_pb2 import Duration
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS)
session = _Session(database)
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration)
)
api.begin_transaction.assert_called_once_with(
session=session.name,
options=expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
status=StatusCode.OK,
attributes=BASE_ATTRIBUTES,
)
def test_begin_ok_exact_strong(self):
from google.cloud.spanner_v1 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
session = _Session(database)
snapshot = self._make_one(session, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
api.begin_transaction.assert_called_once_with(
session=session.name,
options=expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
status=StatusCode.OK,
attributes=BASE_ATTRIBUTES,
)
class _Client(object):
def __init__(self):
from google.cloud.spanner_v1 import ExecuteSqlRequest
self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1")
class _Instance(object):
def __init__(self):
self._client = _Client()
class _Database(object):
def __init__(self):
self.name = "testing"
self._instance = _Instance()
class _Session(object):
def __init__(self, database=None, name=TestSnapshot.SESSION_NAME):
self._database = database
self.name = name
class _MockIterator(object):
def __init__(self, *values, **kw):
self._iter_values = iter(values)
self._fail_after = kw.pop("fail_after", False)
self._error = kw.pop("error", Exception)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._iter_values)
except StopIteration:
if self._fail_after:
raise self._error
raise
next = __next__
| apache-2.0 | 6,984,511,810,110,172,000 | 36.394171 | 88 | 0.617018 | false |
Fuzion24/mitmproxy | libmproxy/console/flowview.py | 1 | 22322 | from __future__ import absolute_import
import os
import sys
import urwid
from netlib import odict
from . import common, grideditor, contentview, signals, searchable, tabs
from . import flowdetailview
from .. import utils, controller
from ..protocol.http import HTTPRequest, HTTPResponse, CONTENT_MISSING, decoded
class SearchError(Exception):
pass
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("Z", "copy as curl command"),
("d", "delete flow"),
("D", "duplicate flow"),
("e", "edit request/response"),
("f", "load full body data"),
("m", "change body display mode for this entity"),
(None,
common.highlight_key("automatic", "a") +
[("text", ": automatic detection")]
),
(None,
common.highlight_key("hex", "e") +
[("text", ": Hex")]
),
(None,
common.highlight_key("html", "h") +
[("text", ": HTML")]
),
(None,
common.highlight_key("image", "i") +
[("text", ": Image")]
),
(None,
common.highlight_key("javascript", "j") +
[("text", ": JavaScript")]
),
(None,
common.highlight_key("json", "s") +
[("text", ": JSON")]
),
(None,
common.highlight_key("urlencoded", "u") +
[("text", ": URL-encoded data")]
),
(None,
common.highlight_key("raw", "r") +
[("text", ": raw data")]
),
(None,
common.highlight_key("xml", "x") +
[("text", ": XML")]
),
("M", "change default body display mode"),
("p", "previous flow"),
("P", "copy response(content/headers) to clipboard"),
("r", "replay request"),
("V", "revert changes to request"),
("v", "view body in external viewer"),
("w", "save all flows matching current limit"),
("W", "save this flow"),
("x", "delete body"),
("z", "encode/decode a request/response"),
("tab", "next tab"),
("h, l", "previous tab, next tab"),
("space", "next flow"),
("|", "run script on this flow"),
("/", "search (case sensitive)"),
("n", "repeat search forward"),
("N", "repeat search backwards"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
('heading_key', "q"), ":back ",
]
class FlowViewHeader(urwid.WidgetWrap):
def __init__(self, master, f):
self.master, self.flow = master, f
self._w = common.format_flow(
f,
False,
extended=True,
padding=0,
hostheader=self.master.showhost
)
signals.flow_change.connect(self.sig_flow_change)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self._w = common.format_flow(
flow,
False,
extended=True,
padding=0,
hostheader=self.master.showhost
)
cache = utils.LRUCache(200)
TAB_REQ = 0
TAB_RESP = 1
class FlowView(tabs.Tabs):
highlight_color = "focusfield"
def __init__(self, master, state, flow, tab_offset):
self.master, self.state, self.flow = master, state, flow
tabs.Tabs.__init__(self,
[
(self.tab_request, self.view_request),
(self.tab_response, self.view_response),
(self.tab_details, self.view_details),
],
tab_offset
)
self.show()
self.last_displayed_body = None
signals.flow_change.connect(self.sig_flow_change)
def tab_request(self):
if self.flow.intercepted and not self.flow.reply.acked and not self.flow.response:
return "Request intercepted"
else:
return "Request"
def tab_response(self):
if self.flow.intercepted and not self.flow.reply.acked and self.flow.response:
return "Response intercepted"
else:
return "Response"
def tab_details(self):
return "Detail"
def view_request(self):
return self.conn_text(self.flow.request)
def view_response(self):
return self.conn_text(self.flow.response)
def view_details(self):
return flowdetailview.flowdetails(self.state, self.flow)
def sig_flow_change(self, sender, flow):
if flow == self.flow:
self.show()
def content_view(self, viewmode, conn):
if conn.content == CONTENT_MISSING:
msg, body = "", [urwid.Text([("error", "[content missing]")])]
return (msg, body)
else:
full = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
False
)
if full:
limit = sys.maxsize
else:
limit = contentview.VIEW_CUTOFF
description, text_objects = cache.get(
contentview.get_content_view,
viewmode,
tuple(tuple(i) for i in conn.headers.lst),
conn.content,
limit,
isinstance(conn, HTTPRequest)
)
return (description, text_objects)
def viewmode_get(self):
override = self.state.get_flow_setting(
self.flow,
(self.tab_offset, "prettyview")
)
return self.state.default_body_view if override is None else override
def conn_text(self, conn):
if conn:
txt = common.format_keyvals(
[(h + ":", v) for (h, v) in conn.headers.lst],
key = "header",
val = "text"
)
viewmode = self.viewmode_get()
msg, body = self.content_view(viewmode, conn)
cols = [
urwid.Text(
[
("heading", msg),
]
)
]
cols.append(
urwid.Text(
[
" ",
('heading', "["),
('heading_key', "m"),
('heading', (":%s]" % viewmode.name)),
],
align="right"
)
)
title = urwid.AttrWrap(urwid.Columns(cols), "heading")
txt.append(title)
txt.extend(body)
else:
txt = [
urwid.Text(""),
urwid.Text(
[
("highlight", "No response. Press "),
("key", "e"),
("highlight", " and edit any aspect to add one."),
]
)
]
return searchable.Searchable(self.state, txt)
def set_method_raw(self, m):
if m:
self.flow.request.method = m
signals.flow_change.send(self, flow = self.flow)
def edit_method(self, m):
if m == "e":
signals.status_prompt.send(
prompt = "Method",
text = self.flow.request.method,
callback = self.set_method_raw
)
else:
for i in common.METHOD_OPTIONS:
if i[1] == m:
self.flow.request.method = i[0].upper()
signals.flow_change.send(self, flow = self.flow)
def set_url(self, url):
request = self.flow.request
try:
request.url = str(url)
except ValueError:
return "Invalid URL."
signals.flow_change.send(self, flow = self.flow)
def set_resp_code(self, code):
response = self.flow.response
try:
response.code = int(code)
except ValueError:
return None
import BaseHTTPServer
if int(code) in BaseHTTPServer.BaseHTTPRequestHandler.responses:
response.msg = BaseHTTPServer.BaseHTTPRequestHandler.responses[
int(code)][0]
signals.flow_change.send(self, flow = self.flow)
def set_resp_msg(self, msg):
response = self.flow.response
response.msg = msg
signals.flow_change.send(self, flow = self.flow)
def set_headers(self, lst, conn):
conn.headers = odict.ODictCaseless(lst)
signals.flow_change.send(self, flow = self.flow)
def set_query(self, lst, conn):
conn.set_query(odict.ODict(lst))
signals.flow_change.send(self, flow = self.flow)
def set_path_components(self, lst, conn):
conn.set_path_components(lst)
signals.flow_change.send(self, flow = self.flow)
def set_form(self, lst, conn):
conn.set_form_urlencoded(odict.ODict(lst))
signals.flow_change.send(self, flow = self.flow)
def edit_form(self, conn):
self.master.view_grideditor(
grideditor.URLEncodedFormEditor(
self.master,
conn.get_form_urlencoded().lst,
self.set_form,
conn
)
)
def edit_form_confirm(self, key, conn):
if key == "y":
self.edit_form(conn)
def set_cookies(self, lst, conn):
od = odict.ODict(lst)
conn.set_cookies(od)
signals.flow_change.send(self, flow = self.flow)
def set_setcookies(self, data, conn):
conn.set_cookies(data)
signals.flow_change.send(self, flow = self.flow)
def edit(self, part):
if self.tab_offset == TAB_REQ:
message = self.flow.request
else:
if not self.flow.response:
self.flow.response = HTTPResponse(
self.flow.request.httpversion,
200, "OK", odict.ODictCaseless(), ""
)
self.flow.response.reply = controller.DummyReply()
message = self.flow.response
self.flow.backup()
if message == self.flow.request and part == "c":
self.master.view_grideditor(
grideditor.CookieEditor(
self.master,
message.get_cookies().lst,
self.set_cookies,
message
)
)
if message == self.flow.response and part == "c":
self.master.view_grideditor(
grideditor.SetCookieEditor(
self.master,
message.get_cookies(),
self.set_setcookies,
message
)
)
if part == "r":
with decoded(message):
# Fix an issue caused by some editors when editing a
# request/response body. Many editors make it hard to save a
# file without a terminating newline on the last line. When
# editing message bodies, this can cause problems. For now, I
# just strip the newlines off the end of the body when we return
# from an editor.
c = self.master.spawn_editor(message.content or "")
message.content = c.rstrip("\n")
elif part == "f":
if not message.get_form_urlencoded() and message.content:
signals.status_prompt_onekey.send(
prompt = "Existing body is not a URL-encoded form. Clear and edit?",
keys = [
("yes", "y"),
("no", "n"),
],
callback = self.edit_form_confirm,
args = (message,)
)
else:
self.edit_form(message)
elif part == "h":
self.master.view_grideditor(
grideditor.HeaderEditor(
self.master,
message.headers.lst,
self.set_headers,
message
)
)
elif part == "p":
p = message.get_path_components()
self.master.view_grideditor(
grideditor.PathEditor(
self.master,
p,
self.set_path_components,
message
)
)
elif part == "q":
self.master.view_grideditor(
grideditor.QueryEditor(
self.master,
message.get_query().lst,
self.set_query, message
)
)
elif part == "u":
signals.status_prompt.send(
prompt = "URL",
text = message.url,
callback = self.set_url
)
elif part == "m":
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.edit_method
)
elif part == "o":
signals.status_prompt.send(
prompt = "Code",
text = str(message.code),
callback = self.set_resp_code
)
elif part == "m":
signals.status_prompt.send(
prompt = "Message",
text = message.msg,
callback = self.set_resp_msg
)
signals.flow_change.send(self, flow = self.flow)
def _view_nextprev_flow(self, np, flow):
try:
idx = self.state.view.index(flow)
except IndexError:
return
if np == "next":
new_flow, new_idx = self.state.get_next(idx)
else:
new_flow, new_idx = self.state.get_prev(idx)
if new_flow is None:
signals.status_message.send(message="No more flows!")
else:
signals.pop_view_state.send(self)
self.master.view_flow(new_flow, self.tab_offset)
def view_next_flow(self, flow):
return self._view_nextprev_flow("next", flow)
def view_prev_flow(self, flow):
return self._view_nextprev_flow("prev", flow)
def change_this_display_mode(self, t):
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "prettyview"),
contentview.get_by_shortcut(t)
)
signals.flow_change.send(self, flow = self.flow)
def delete_body(self, t):
if t == "m":
val = CONTENT_MISSING
else:
val = None
if self.tab_offset == TAB_REQ:
self.flow.request.content = val
else:
self.flow.response.content = val
signals.flow_change.send(self, flow = self.flow)
def keypress(self, size, key):
key = super(self.__class__, self).keypress(size, key)
if key == " ":
self.view_next_flow(self.flow)
return
key = common.shortcuts(key)
if self.tab_offset == TAB_REQ:
conn = self.flow.request
elif self.tab_offset == TAB_RESP:
conn = self.flow.response
else:
conn = None
if key in ("up", "down", "page up", "page down"):
# Why doesn't this just work??
self._w.keypress(size, key)
elif key == "a":
self.flow.accept_intercept(self.master)
self.master.view_flow(self.flow)
elif key == "A":
self.master.accept_all()
self.master.view_flow(self.flow)
elif key == "d":
if self.state.flow_count() == 1:
self.master.view_flowlist()
elif self.state.view.index(self.flow) == len(self.state.view) - 1:
self.view_prev_flow(self.flow)
else:
self.view_next_flow(self.flow)
f = self.flow
f.kill(self.master)
self.state.delete_flow(f)
elif key == "D":
f = self.master.duplicate_flow(self.flow)
self.master.view_flow(f)
signals.status_message.send(message="Duplicated.")
elif key == "p":
self.view_prev_flow(self.flow)
elif key == "r":
r = self.master.replay_request(self.flow)
if r:
signals.status_message.send(message=r)
signals.flow_change.send(self, flow = self.flow)
elif key == "V":
if not self.flow.modified():
signals.status_message.send(message="Flow not modified.")
return
self.state.revert(self.flow)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="Reverted.")
elif key == "W":
signals.status_prompt_path.send(
prompt = "Save this flow",
callback = self.master.save_one_flow,
args = (self.flow,)
)
elif key == "Z":
common.copy_as_curl_command(self.flow)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
if not conn and key in set(list("befgmxvz")):
signals.status_message.send(
message = "Tab to the request or response",
expire = 1
)
elif conn:
if key == "b":
if self.tab_offset == TAB_REQ:
common.ask_save_body(
"q", self.master, self.state, self.flow
)
else:
common.ask_save_body(
"s", self.master, self.state, self.flow
)
elif key == "e":
if self.tab_offset == TAB_REQ:
signals.status_prompt_onekey.send(
prompt = "Edit request",
keys = (
("cookies", "c"),
("query", "q"),
("path", "p"),
("url", "u"),
("header", "h"),
("form", "f"),
("raw body", "r"),
("method", "m"),
),
callback = self.edit
)
else:
signals.status_prompt_onekey.send(
prompt = "Edit response",
keys = (
("cookies", "c"),
("code", "o"),
("message", "m"),
("header", "h"),
("raw body", "r"),
),
callback = self.edit
)
key = None
elif key == "f":
signals.status_message.send(message="Loading all body data...")
self.state.add_flow_setting(
self.flow,
(self.tab_offset, "fullcontents"),
True
)
signals.flow_change.send(self, flow = self.flow)
signals.status_message.send(message="")
elif key == "P":
if self.tab_offset == TAB_REQ:
scope = "q"
else:
scope = "s"
common.ask_copy_part(scope, self.flow, self.master, self.state)
elif key == "m":
p = list(contentview.view_prompts)
p.insert(0, ("Clear", "C"))
signals.status_prompt_onekey.send(
self,
prompt = "Display mode",
keys = p,
callback = self.change_this_display_mode
)
key = None
elif key == "x":
signals.status_prompt_onekey.send(
prompt = "Delete body",
keys = (
("completely", "c"),
("mark as missing", "m"),
),
callback = self.delete_body
)
key = None
elif key == "v":
if conn.content:
t = conn.headers["content-type"] or [None]
t = t[0]
if "EDITOR" in os.environ or "PAGER" in os.environ:
self.master.spawn_external_viewer(conn.content, t)
else:
signals.status_message.send(
message = "Error! Set $EDITOR or $PAGER."
)
elif key == "z":
self.flow.backup()
e = conn.headers.get_first("content-encoding", "identity")
if e != "identity":
if not conn.decode():
signals.status_message.send(
message = "Could not decode - invalid data?"
)
else:
signals.status_prompt_onekey.send(
prompt = "Select encoding: ",
keys = (
("gzip", "z"),
("deflate", "d"),
),
callback = self.encode_callback,
args = (conn,)
)
signals.flow_change.send(self, flow = self.flow)
return key
def encode_callback(self, key, conn):
encoding_map = {
"z": "gzip",
"d": "deflate",
}
conn.encode(encoding_map[key])
signals.flow_change.send(self, flow = self.flow)
| mit | 8,710,828,440,068,676,000 | 33.131498 | 90 | 0.457307 | false |
sternshus/arelle2.7 | svr-2.7/arelle/ValidateVersReport.py | 1 | 44336 | u'''
Created on Nov 9, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import ModelVersObject, XbrlConst, ValidateXbrl, ModelDocument
from arelle.ModelValue import qname
conceptAttributeEventAttributes = {
u"conceptAttributeDelete": (u"fromCustomAttribute",),
u"conceptAttributeAdd": (u"toCustomAttribute",),
u"conceptAttributeChange": (u"fromCustomAttribute",u"toCustomAttribute"),
u"conceptAttributeChange": (u"fromCustomAttribute",u"toCustomAttribute"),
u"attributeDefinitionChange": (u"fromCustomAttribute",u"toCustomAttribute"),
}
schemaAttributeEventAttributes = {
u"conceptIDChange": u"id",
u"conceptTypeChange": u"type",
u"conceptSubstitutionGroupChange": u"substitutionGroup",
u"conceptNillableChange": u"nillable",
u"conceptAbstractChange": u"abstract",
u"conceptBlockChange": u"block",
u"conceptDefaultChange": u"default",
u"conceptFixedChange": u"fixed",
u"conceptFinalChange": u"final"
}
class ValidateVersReport():
def __init__(self, testModelXbrl):
self.testModelXbrl = testModelXbrl # testcase or controlling validation object
def close(self):
self.__dict__.clear() # dereference everything
def validate(self, modelVersReport):
self.modelVersReport = modelVersReport
versReport = modelVersReport.modelDocument
if not hasattr(versReport, u"xmlDocument"): # not parsed
return
for DTSname in (u"fromDTS", u"toDTS"):
DTSmodelXbrl = getattr(versReport, DTSname)
if DTSmodelXbrl is None or DTSmodelXbrl.modelDocument is None:
self.modelVersReport.error(u"vere:invalidDTSIdentifier",
_(u"%(dts)s is missing or not loaded"),
modelObject=self, dts=DTSname)
else:
# validate DTS
ValidateXbrl.ValidateXbrl(DTSmodelXbrl).validate(DTSmodelXbrl)
if len(DTSmodelXbrl.errors) > 0:
self.modelVersReport.error(u"vere:invalidDTSIdentifier",
_(u"%(dts) has errors: %(error)s"),
modelObject=DTSmodelXbrl.modelDocument, dts=DTSname, error=DTSmodelXbrl.errors)
# validate linkbases
ValidateXbrl.ValidateXbrl(self.modelVersReport).validate(modelVersReport)
versReportElt = versReport.xmlRootElement
# check actions
for assignmentRef in versReportElt.iterdescendants(tag=u"{http://xbrl.org/2010/versioning-base}assignmentRef"):
ref = assignmentRef.get(u"ref")
if ref not in versReport.idObjects or \
not isinstance(versReport.idObjects[ref], ModelVersObject.ModelAssignment):
self.modelVersReport.error(u"vere:invalidAssignmentRef",
_(u"AssignmentRef %(assignmentRef)s does not reference an assignment"),
modelObject=assignmentRef, assignmentRef=ref)
# check namespace renames
for NSrename in versReport.namespaceRenameFrom.values():
if NSrename.fromURI not in versReport.fromDTS.namespaceDocs:
self.modelVersReport.error(u"vere:invalidNamespaceMapping",
_(u"NamespaceRename fromURI %(uri)s does not reference a schema in fromDTS"),
modelObject=self, uri=NSrename.fromURI)
if NSrename.toURI not in versReport.toDTS.namespaceDocs:
self.modelVersReport.error(u"vere:invalidNamespaceMapping",
_(u"NamespaceRename toURI %(uri)s does not reference a schema in toDTS"),
modelObject=self, uri=NSrename.toURI)
# check role changes
for roleChange in versReport.roleChanges.values():
if roleChange.fromURI not in versReport.fromDTS.roleTypes:
self.modelVersReport.error(u"vere:invalidRoleChange",
_(u"RoleChange fromURI %(uri)s does not reference a roleType in fromDTS"),
modelObject=self, uri=roleChange.fromURI)
if roleChange.toURI not in versReport.toDTS.roleTypes:
self.modelVersReport.error(u"vere:invalidRoleChange",
_(u"RoleChange toURI %(uri)s does not reference a roleType in toDTS"),
modelObject=self, uri=roleChange.toURI)
# check reportRefs
# check actions
for reportRef in versReportElt.iterdescendants(tag=u"{http://xbrl.org/2010/versioning-base}reportRef"):
# if existing it must be valid
href = reportRef.get(u"{http://www.w3.org/1999/xlink}href")
# TBD
if versReport.fromDTS and versReport.toDTS:
# check concept changes of concept basic
for conceptChange in versReport.conceptUseChanges:
fromConceptQn = conceptChange.fromConceptQname
toConceptQn = conceptChange.toConceptQname
if (conceptChange.name != u"conceptAdd" and
(fromConceptQn is None or fromConceptQn not in versReport.fromDTS.qnameConcepts)):
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s fromConcept %(concept)s does not reference a concept in fromDTS"),
modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.fromConceptQname)
if (conceptChange.name != u"conceptDelete" and
(toConceptQn is None or toConceptQn not in versReport.toDTS.qnameConcepts)):
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s toConcept %(concept)s does not reference a concept in toDTS"),
modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname)
if (conceptChange.name == u"conceptAdd" and toConceptQn is not None and
conceptChange.isPhysical ^
(qname(versReport.namespaceRenameTo.get(toConceptQn.namespaceURI, toConceptQn.namespaceURI),
toConceptQn.localName) not in versReport.fromDTS.qnameConcepts)):
self.modelVersReport.error(u"vercue:inconsistentPhysicalAttribute",
_(u"%(event)s toConcept %(concept)s physical attribute conflicts with presence in fromDTS"),
modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname)
if (conceptChange.name == u"conceptDelete" and toConceptQn is not None and
conceptChange.isPhysical ^
(qname(versReport.namespaceRenameFrom.get(fromConceptQn.namespaceURI, fromConceptQn.namespaceURI),
fromConceptQn.localName) in versReport.toDTS.qnameConcepts)):
self.modelVersReport.error(u"vercue:inconsistentPhysicalAttribute",
_(u"%(event)s toConcept %(concept)s physical attribute conflicts with presence in toDTS"),
modelObject=conceptChange, event=conceptChange.name, concept=conceptChange.toConceptQname)
# check concept changes of concept extended
equivalentAttributes = {}
for conceptChange in versReport.conceptDetailsChanges:
fromConcept = conceptChange.fromConcept
toConcept = conceptChange.toConcept
fromResource = conceptChange.fromResource
toResource = conceptChange.toResource
# fromConcept checks
if not conceptChange.name.endswith(u"Add"):
if not fromConcept is not None:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(action)s %(event)s fromConcept %(concept)s does not reference a concept in fromDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=conceptChange.fromConceptQname)
# tuple check
elif _(u"Child") in conceptChange.name and \
not versReport.fromDTS.qnameConcepts[fromConcept.qname] \
.isTuple:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(action)s %(event)s fromConcept %(concept)s must be defined as a tuple"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=conceptChange.fromConceptQname)
# resource check
elif u"Label" in conceptChange.name:
if fromResource is None:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue)
else:
relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptLabel)
if relationship is not None:
if (relationship.qname != XbrlConst.qnLinkLabelArc or
relationship.parentQname != XbrlConst.qnLinkLabelLink or
fromResource.qname != XbrlConst.qnLinkLabel):
self.modelVersReport.error(u"vercde:invalidConceptLabelIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname)
else:
relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementLabel)
if relationship is not None:
if relationship.qname != XbrlConst.qnGenArc or \
fromResource.qname != XbrlConst.qnGenLabel:
self.modelVersReport.error(u"vercde:invalidConceptLabelIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname)
else:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s does not have a label relationship to {3} in fromDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue)
elif u"Reference" in conceptChange.name:
if fromResource is None:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s does not reference a resource in fromDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue)
else:
relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.conceptReference)
if relationship is not None:
if relationship.qname != XbrlConst.qnLinkReferenceArc or \
relationship.parentQname != XbrlConst.qnLinkReferenceLink or \
fromResource.qname != XbrlConst.qnLinkReference:
self.modelVersReport.error(u"vercde:invalidConceptReferenceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname)
else:
relationship = fromConcept.relationshipToResource(fromResource, XbrlConst.elementReference)
if relationship is not None:
if relationship.qname != XbrlConst.qnGenArc or \
fromResource.qname != XbrlConst.qnGenReference:
self.modelVersReport.error(u"vercde:invalidConceptReferenceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s for %(concept)s in fromDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname)
else:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s fromResource %(resource)s does not have a reference relationship to %(concept)s in fromDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.fromResourceValue, concept=conceptChange.fromConceptQname)
# toConcept checks
if not conceptChange.name.endswith(u"Delete"):
if not toConcept is not None:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(action)s %(event)s toConcept %(concept)s does not reference a concept in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=conceptChange.toConceptQname)
# tuple check
elif u"Child" in conceptChange.name and \
not versReport.toDTS.qnameConcepts[toConcept.qname] \
.isTuple:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(action)s %(event)s toConcept %(concept)s must be defined as a tuple"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=conceptChange.toConceptQname)
# resource check
elif u"Label" in conceptChange.name:
if toResource is None:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s for %(concept)s does not reference a resource in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
elif toResource.qname not in (XbrlConst.qnLinkLabel, XbrlConst.qnGenLabel):
self.modelVersReport.error(u"vercde:invalidConceptLabelIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s is not a label in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptLabel)
if relationship is not None:
if relationship.qname != XbrlConst.qnLinkLabelArc or \
relationship.parentQname != XbrlConst.qnLinkLabelLink or \
toResource.qname != XbrlConst.qnLinkLabel:
self.modelVersReport.error(u"vercde:invalidConceptLabelIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementLabel)
if relationship is not None:
if relationship.qname != XbrlConst.qnGenArc or \
toResource.qname != XbrlConst.qnGenLabel:
self.modelVersReport.error(u"vercde:invalidConceptLabelIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
self.modelVersReport.error(u"vercde:invalidConceptResourceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s does not have a label relationship to %(concept)s in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
elif u"Reference" in conceptChange.name:
if toResource is None:
self.modelVersReport.error(u"vercde:invalidResourceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s does not reference a resource in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue)
elif toResource.qname not in (XbrlConst.qnLinkReference, XbrlConst.qnGenReference):
self.modelVersReport.error(u"vercde:invalidConceptReferenceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s is not a reference in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
relationship = toConcept.relationshipToResource(toResource, XbrlConst.conceptReference)
if relationship is not None:
if relationship.qname != XbrlConst.qnLinkReferenceArc or \
relationship.parentQname != XbrlConst.qnLinkReferenceLink or \
toResource.qname != XbrlConst.qnLinkReference:
self.modelVersReport.error(u"vercde:invalidConceptReferenceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
relationship = toConcept.relationshipToResource(toResource, XbrlConst.elementReference)
if relationship is not None:
if relationship.qname != XbrlConst.qnGenArc or \
toResource.qname != XbrlConst.qnGenReference:
self.modelVersReport.error(u"vercde:invalidConceptReferenceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s for %(concept)s in toDTS does not have expected link, arc, or label elements"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
else:
self.modelVersReport.error(u"vercde:invalidConceptResourceIdentifier",
_(u"%(action)s %(event)s toResource %(resource)s does not have a reference relationship to %(concept)s in toDTS"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, resource=conceptChange.toResourceValue, concept=conceptChange.toConceptQname)
# check concept correspondence
if fromConcept is not None and toConcept is not None:
if (versReport.toDTSqname(fromConcept.qname) != toConcept.qname and
versReport.equivalentConcepts.get(fromConcept.qname) != toConcept.qname and
toConcept.qname not in versReport.relatedConcepts.get(fromConcept.qname,[])):
self.modelVersReport.error(u"vercde:invalidConceptCorrespondence",
_(u"%(action)s %(event)s fromConcept %(conceptFrom)s and toConcept %(conceptTo)s must be equivalent or related"),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, conceptFrom=conceptChange.fromConceptQname, conceptTo=conceptChange.toConceptQname)
# custom attribute events
if conceptChange.name.startswith(u"conceptAttribute") or conceptChange.name == u"attributeDefinitionChange":
try:
for attr in conceptAttributeEventAttributes[conceptChange.name]:
customAttributeQname = conceptChange.customAttributeQname(attr)
if not customAttributeQname:
self.modelVersReport.info(u"arelle:invalidAttributeChange",
_(u"%(action)s %(event)s %(attr)s $(attrName)s does not have a name"),
modelObject=conceptChange, action=conceptChange.actionId,
attr=attr, attrName=customAttributeQname)
elif customAttributeQname.namespaceURI in (None, XbrlConst.xbrli, XbrlConst.xsd):
self.modelVersReport.error(u"vercde:illegalCustomAttributeEvent",
_(u"%(action)s %(event)s %(attr)s $(attrName)s has an invalid namespace"),
modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name,
attr=attr, attrName=customAttributeQname)
except KeyError:
self.modelVersReport.info(u"arelle:eventNotRecognized",
_(u"%(action)s %(event)s event is not recognized"),
modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name)
if conceptChange.name == u"attributeDefinitionChange":
fromAttr = conceptChange.customAttributeQname(u"fromCustomAttribute")
toAttr = conceptChange.customAttributeQname(u"toCustomAttribute")
equivalentAttributes[fromAttr] = toAttr
equivalentAttributes[toAttr] = fromAttr
# check item concept identifiers
if conceptChange.name in (u"conceptPeriodTypeChange", u"conceptPeriodTypeChange"):
for concept in (fromConcept, toConcept):
if concept is not None and not concept.isItem:
self.modelVersReport.error(u"vercde:invalidItemConceptIdentifier",
_(u"%(action)s %(event)s concept %(concept)s does not reference an item concept."),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=concept.qname)
# check tuple concept identifiers
if conceptChange.name in (u"tupleContentModelChange", ):
for concept in (fromConcept, toConcept):
if concept is not None and not concept.isItem:
self.modelVersReport.error(u"vercde:invalidTupleConceptIdentifier",
_(u"%(action)s %(event)s concept %(concept)s does not reference a tuple concept."),
modelObject=conceptChange, action=conceptChange.actionId,
event=conceptChange.name, concept=concept.qname)
if conceptChange.name in schemaAttributeEventAttributes:
attr = schemaAttributeEventAttributes[conceptChange.name]
if (fromConcept is not None and not fromConcept.get(attr) and
toConcept is not None and not toConcept.get(attr)):
self.modelVersReport.error(u"vercde:illegalSchemaAttributeChangeEvent",
_(u"%(action)s %(event)s neither concepts have a %(attribute)s attribute: %(fromConcept)s, %(toConcept)s."),
modelObject=conceptChange, action=conceptChange.actionId, attribute=attr,
event=conceptChange.name, fromConcept=fromConcept.qname, toConcept=toConcept.qname)
# check concept changes for equivalent attributes
for conceptChange in versReport.conceptDetailsChanges:
if conceptChange.name == u"conceptAttributeChange":
fromAttr = conceptChange.customAttributeQname(u"fromCustomAttribute")
toAttr = conceptChange.customAttributeQname(u"toCustomAttribute")
if (equivalentAttributes.get(fromAttr) != toAttr and
(fromAttr.localName != toAttr.localName or
(fromAttr.namespaceURI != toAttr.namespaceURI and
versReport.namespaceRenameFrom.get(fromAttr.namespaceURI, fromAttr.namespaceURI) != toAttr.namespaceURI))):
self.modelVersReport.error(u"vercde:invalidAttributeCorrespondence",
_(u"%(action)s %(event)s has non-equivalent attributes %(fromQname)s and %(toQname)s"),
modelObject=conceptChange, action=conceptChange.actionId, event=conceptChange.name,
fromQname=fromAttr, toQname=toAttr)
del equivalentAttributes # dereference
# check relationship set changes
for relSetChange in versReport.relationshipSetChanges:
for relationshipSet, name in ((relSetChange.fromRelationshipSet, u"fromRelationshipSet"),
(relSetChange.toRelationshipSet, u"toRelationshipSet")):
if relationshipSet is not None:
dts = relationshipSet.dts
relationshipSetValid = True
if relationshipSet.link:
if (relationshipSet.link not in dts.qnameConcepts or
(dts.qnameConcepts[relationshipSet.link].type is not None and
not dts.qnameConcepts[relationshipSet.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType))):
self.modelVersReport.error(u"verrelse:invalidLinkElementReferenceEvent",
_(u"%(event)s %(relSet)s link %(link)s does not reference an element in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
link=relationshipSet.link)
relationshipSetValid = False
if relationshipSet.arc:
if (relationshipSet.arc not in dts.qnameConcepts or
(dts.qnameConcepts[relationshipSet.arc].type is not None and
not dts.qnameConcepts[relationshipSet.arc].type.isDerivedFrom(XbrlConst.qnXlArcType))):
self.modelVersReport.error(u"verrelse:invalidArcElementReferenceEvent",
_(u"%(event)s %(relSet)s arc %(arc) does not reference an element in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
arc=relationshipSet.arc)
relationshipSetValid = False
if relationshipSet.linkrole:
if not (XbrlConst.isStandardRole(relationshipSet.linkrole) or
relationshipSet.linkrole in relationshipSet.dts.roleTypes):
self.modelVersReport.error(u"verrelse:invalidLinkrole",
_(u"%(event)s %(relSet)s linkrole %(linkrole)s does not reference an linkrole in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
linkrole=relationshipSet.linkrole)
relationshipSetValid = False
elif not any(linkrole == relationshipSet.linkrole
for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()):
self.modelVersReport.error(u"verrelse:invalidLinkrole",
_(u"%(event)s %(relSet)s linkrole %(linkrole)s is not used in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
linkrole=relationshipSet.linkrole)
relationshipSetValid = False
if relationshipSet.arcrole:
if not (XbrlConst.isStandardArcrole(relationshipSet.arcrole) or
relationshipSet.arcrole in relationshipSet.dts.arcroleTypes):
self.modelVersReport.error(u"verrelse:invalidArcrole",
_(u"%(event)s %(relSet)s arcrole %(arcrole)s does not reference an arcrole in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
arcrole=relationshipSet.arcrole)
relationshipSetValid = False
elif not any(arcrole == relationshipSet.arcrole
for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()):
self.modelVersReport.error(u"verrelse:invalidArcrole",
_(u"%(event)s %(relSet)s arcrole %(arcrole)s is not used in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
arcrole=relationshipSet.arcrole)
relationshipSetValid = False
for relationship in relationshipSet.relationships:
# fromConcept checks
if relationship.fromConcept is None:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s %(relSet)s relationship fromConcept %(conceptFrom)s does not reference a concept in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
conceptFrom=relationship.fromName)
relationshipSetValid = False
if relationship.toName and relationship.toConcept is None:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s %(relSet)s relationship toConcept %(conceptTo)s does not reference a concept in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
conceptTo=relationship.toName)
relationshipSetValid = False
if relationshipSetValid: # test that relations exist
if relationship.fromRelationship is None:
if relationship.toName:
self.modelVersReport.error(u"verrelse:invalidRelationshipReference",
_(u"%(event)s %(relSet)s no relationship found from fromConcept %(conceptFrom)s to toConcept %(conceptTo)s in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
conceptFrom=relationship.fromName, conceptTo=relationship.toName)
else:
self.modelVersReport.error(u"verrelse:invalidRelationshipReference",
_(u"%(event)s %(relSet)s no relationship found fromConcept %(conceptFrom)s in its DTS"),
modelObject=relSetChange, event=relSetChange.name, relSet=name,
conceptFrom=relationship.fromName)
# check instance aspect changes
for iaChange in versReport.instanceAspectChanges:
for instAspects in (iaChange.fromAspects, iaChange.toAspects):
if instAspects is not None and instAspects.aspects:
dimAspectElts = {}
for aspect in instAspects.aspects:
dts = aspect.modelAspects.dts
if (aspect.localName in (u"explicitDimension", u"typedDimension") and aspect.concept is None):
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s dimension %(dimension)s is not a concept in its DTS"),
modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName)
elif aspect.localName == u"explicitDimension":
dimConcept = aspect.concept
if not dimConcept.isExplicitDimension:
self.modelVersReport.error(u"verdime:invalidExplicitDimensionIdentifier",
_(u"%(event)s dimension %(dimension)s is not an explicit dimension in its DTS"),
modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName)
if dimConcept in dimAspectElts:
self.modelVersReport.error(u"verdime:duplicateExplicitDimensionAspect",
_(u"%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"),
modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName)
else:
dimAspectElts[dimConcept] = aspect
elif aspect.localName == u"typedDimension":
dimConcept = aspect.concept
if not dimConcept.isTypedDimension:
self.modelVersReport.error(u"verdime:invalidTypedDimensionIdentifier",
_(u"%(event)s dimension %(dimension)s is not a typed dimension in its DTS"),
modelObject=aspect, event=iaChange.name, dimension=aspect.conceptName)
if dimConcept in dimAspectElts:
self.modelVersReport.error(u"verdime:duplicateTypedDimensionAspect",
_(u"%(event)s dimension %(dimension)s is duplicated in a single explicitDimension element"),
modelObject=(aspect, dimAspectElts[dimConcept]), event=iaChange.name, dimension=aspect.conceptName)
else:
dimAspectElts[dimConcept] = aspect
if aspect.localName in (u"explicitDimension", u"concepts"):
for relatedConcept in aspect.relatedConcepts:
conceptMdlObj = relatedConcept.concept
if conceptMdlObj is None or not conceptMdlObj.isItem:
self.modelVersReport.error(u"vercue:invalidConceptReference",
_(u"%(event)s concept %(concept)s is not an item in its DTS"),
modelObject=aspect, event=iaChange.name, concept=relatedConcept.conceptName)
if relatedConcept.arcrole is not None:
if (not XbrlConst.isStandardArcrole(relatedConcept.arcrole) and
relatedConcept.arcrole not in dts.arcroleTypes):
self.modelVersReport.error(u"verdime:invalidURI",
_(u"%(event)s arcrole %(arcrole)s is not defined in its DTS"),
modelObject=aspect, event=iaChange.name, arcrole=relatedConcept.arcrole)
elif not any(arcrole == relatedConcept.arcrole
for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()):
self.modelVersReport.error(u"verdime:invalidURI",
_(u"%(event)s arcrole %(arcrole)s is not used in its DTS"),
modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.arcrole)
if relatedConcept.linkrole is not None:
if (relatedConcept.linkrole != u"http://www.xbrl.org/2003/role/link" and
relatedConcept.linkrole not in dts.roleTypes):
self.modelVersReport.error(u"verdime:invalidURI",
_(u"%(event)s linkrole %(linkrole)s is not defined in its DTS"),
modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole)
elif not any(linkrole == relatedConcept.linkrole
for arcrole, linkrole, linkqname, arcqname in dts.baseSets.keys()):
self.modelVersReport.error(u"verdime:invalidURI",
_(u"%(event)s linkrole %(linkrole)s is not used in its DTS"),
modelObject=aspect, event=iaChange.name, linkrole=relatedConcept.linkrole)
if (relatedConcept.arc is not None and
(relatedConcept.arc not in dts.qnameConcepts or
(dts.qnameConcepts[relatedConcept.arc].type is not None and
not dts.qnameConcepts[relatedConcept.arc].type.isDerivedFrom(XbrlConst.qnXlArcType)))):
self.modelVersReport.error(u"verdime:invalidArcElement",
_(u"%(event)s arc %(arc)s is not defined as an arc in its DTS"),
modelObject=aspect, event=iaChange.name, arc=relatedConcept.arc)
if (relatedConcept.link is not None and
(relatedConcept.link not in dts.qnameConcepts or
(dts.qnameConcepts[relatedConcept.link].type is not None and
not dts.qnameConcepts[relatedConcept.link].type.isDerivedFrom(XbrlConst.qnXlExtendedType)))):
self.modelVersReport.error(u"verdime:invalidLinkElement",
_(u"%(event)s link %(link)s is not defined in its DTS"),
modelObject=aspect, event=iaChange.name, link=relatedConcept.link)
self.close() | apache-2.0 | 4,493,540,456,087,069,000 | 80.107407 | 177 | 0.541276 | false |
weinbe58/QuSpin | tests/higher_spin_test.py | 1 | 2498 | import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.operators import hamiltonian
from quspin.basis import spin_basis_1d
import numpy as np
from itertools import product
try:
from functools import reduce
except ImportError:
pass
dtypes = [(np.float32,np.complex64),(np.float64,np.complex128)]
spin_ops={}
spins=['1/2','1','3/2','2']
spin_ops['1/2']={}
spin_ops['1/2']["I"]=np.array([[1,0],[0,1]]) + 0.0j
spin_ops['1/2']['x']=(1.0/2.0)*np.array([[0,1],[1,0]]) + 0.0j
spin_ops['1/2']['y']=(1.0j/2.0)*np.array([[0,-1],[1,0]]) + 0.0j
spin_ops['1/2']['z']=(1.0/2.0)*np.array([[1,0.0],[0.0,-1]]) + 0.0j
spin_ops['1']={}
spin_ops['1']['I']=np.array([[1,0,0],[0,1,0],[0,0,1]]) + 0.0j
spin_ops['1']['x']=(1.0/np.sqrt(2))*np.array([[0,1,0],[1,0,1],[0,1,0]]) + 0.0j
spin_ops['1']['y']=(1.0j/np.sqrt(2))*np.array([[0,-1,0],[1,0,-1],[0,1,0]]) +0.0j
spin_ops['1']['z']=np.array([[1,0,0],[0,0,0],[0,0,-1]]) + 0.0j
spin_ops['3/2']={}
spin_ops['3/2']['I']=np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
spin_ops['3/2']['x']=(1.0/2.0)*np.array([[0,np.sqrt(3),0,0],[np.sqrt(3),0,2,0],[0,2,0,np.sqrt(3)],[0,0,np.sqrt(3),0]]) + 0j
spin_ops['3/2']['y']=(1.0j/2.0)*np.array([[0,-np.sqrt(3),0,0],[np.sqrt(3),0,-2,0],[0,2,0,-np.sqrt(3)],[0,0,np.sqrt(3),0]])
spin_ops['3/2']['z']=(1.0/2.0)*np.array([[3,0,0,0],[0,1,0,0],[0,0,-1,0],[0,0,0,-3]])+ 0.0j
spin_ops['2']={}
spin_ops['2']['I']=np.array([[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]])
spin_ops['2']['x']=(1.0/2.0)*np.array([[0,2.0,0,0,0],[2,0,np.sqrt(6),0,0],[0,np.sqrt(6),0,np.sqrt(6),0],[0,0,np.sqrt(6),0,2],[0,0,0,2,0]])
spin_ops['2']['y']=(1.0j/2.0)*np.array([[0,-2.0,0,0,0],[2,0,-np.sqrt(6),0,0],[0,np.sqrt(6),0,-np.sqrt(6),0],[0,0,np.sqrt(6),0,-2],[0,0,0,2,0]])
spin_ops['2']['z']=np.array([[2,0,0,0,0],[0,1,0,0,0],[0,0,0,0,0],[0,0,0,-1,0],[0,0,0,0,-2]])+ 0.0j
L_max = 4
for S in spins:
for L in range(1,L_max+1):
basis = spin_basis_1d(L,S=S,pauli=False)
J = [1.0]
J.extend(range(L))
for p in product(*[spin_ops[S].items() for i in range(L)]):
opstr,ops = zip(*list(p))
opstr = "".join(opstr)
static = [[opstr,[J]]]
static,_ = basis.expanded_form(static,[])
quspin_op = hamiltonian(static,[],basis=basis,check_symm=False,check_herm=False)
op = reduce(np.kron,ops)
np.testing.assert_allclose(quspin_op.toarray(),op,atol=1e-14,err_msg="failed test for S={} operator {}".format(S,opstr))
print("spin-{} operators comparisons passed!".format(S))
| bsd-3-clause | -688,366,178,981,138,300 | 36.848485 | 143 | 0.552842 | false |
MehnaazAsad/ECO_Globular_Clusters | src/data/mods_prelim_checks_2/Exp_fil2.py | 1 | 6614 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 17:36:35 2017
@author: asadm2
"""
### DESCRIPTION
#This script carries out an exposure time check and an "at least 2 filters"
#check
import pandas as pd
import numpy as np
def expfil2(objname,revtxt):
"""
This function carries out an exposure time check and also checks if
the images that passed the first check are taken using at least 2 filters
Args:
objname: ECOID of the galaxy
revtxt: objname_rev.txt file that Obj_in_Img.py returns
Returns:
goodObj.txt and badObj.txt files depending on which ECOIDs passed
both checks and which ones didn't
"""
path_to_raw = '/fs1/masad/Research/Repositories/ECO_Globular_Clusters/'\
'data/raw/'
path_to_interim = '/fs1/masad/Research/Repositories/ECO_Globular_Clusters/'\
'data/interim/'
ECO = path_to_raw + 'Available_HST_Data_ECO.txt'
ECO = pd.read_csv(ECO, delimiter='\s+', header=None, \
names=['ECOID', 'HSTOBJ', 'RA', 'DEC', 'exptime', \
'camera', 'filename'])
ECO['exptime'] = pd.to_numeric(ECO['exptime'],errors='coerce')
ECO['filename'] = ECO['filename'].astype('str')
files_arr = ECO['filename'].values
n_files = len(files_arr)
wfc3_ir = ['f110w','f125w','f160w']
wfc3_uvis = ['f606w','f600lp']
filters = [[] for x in range(n_files)]
for i in range(len(ECO['filename'])):
str_split = ECO['filename'][i].split(';')[1].split('_')
filter_i = str_split[3]+'_'+str_split[4]+'_'+str_split[5]
if 'ACS' in filter_i: #acs_wfc
filter_i = filter_i.lower()
elif 'd634' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'm51' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'tile' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'c_v' in filter_i: #acs-wfc fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
if 'acs-wfc' in filter_i:
str_split = filter_i.split('-')
filter_i = str_split[0]+'_'+str_split[1]
elif 'ngc' in filter_i: #acs fixed to acs_wfc
str_split = filter_i.split('_')
filter_i = str_split[0]+'_wfc_'+str_split[2]
elif '131009' in filter_i: #wfc3
str_split = filter_i.split('_')
if str_split[2] == 'f438w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif str_split[2] == 'f775w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif 'par' in filter_i:#and any(str in filter_i for str in wfc3_ir):
str_split = filter_i.split('_')
if str_split[2] == 'f606w':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif str_split[2] == 'f125w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f160w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f110w':
filter_i = str_split[0]+'_ir_'+str_split[2]
elif str_split[2] == 'f600lp':
filter_i = str_split[0]+'_uvis_'+str_split[2]
elif 'w_wf' in filter_i: #wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[1]
elif 'lp_wf' in filter_i: #wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[1]
elif 'n4496' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
elif 'n5194' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
elif 'u6614' in filter_i: #all wfpc2
str_split = filter_i.split('_')
filter_i = str_split[0]+'_'+str_split[2]
filters[i] = filter_i
filters = np.asarray(filters)
filters_unique = np.unique(filters)
#Adding filter array to DataFrame
ECO.loc[:, 'filters'] = filters
### Exposure time check
exptime_arr = [9399, 3671, 3331, 1319, 2055, 2236, 1758, 10337, 2045, 1237,
2290, 3853, 1928101311, 73024829, 275363, 1241, 31705,
26575,6021, 3548, 3723, 2053, 2249, 3368, 5275, 4069,
171413, 31062, 11431, 5789, 8520, 10071, 6677, 24445, 12605,
10757, 50294]
exp_fil_dict = dict(zip(filters_unique, exptime_arr ))
contents = pd.read_csv(revtxt,header=None,names=['filename'])
contents.filename = 'http://hla.stsci.edu/cgi-bin/' + contents.filename\
.astype(str)
#Match and return all columns associated with this ECOID and filename
#from ECO catalog
ECO2 = ECO.loc[(ECO.filename.isin(contents.filename)) & \
(ECO.ECOID==objname),: ]
ECOID_groups = ECO2.groupby('filters')
ECO_keys = ECOID_groups.groups.keys()
ECO_match3 = []
for key in ECO_keys:
if ECOID_groups.get_group(key).exptime.sum() >= exp_fil_dict[key]:
ECO_match3.append(key) #"good" keys
ECO_match3 = np.array(ECO_match3)
### At least 2 filter check
if len(ECO_match3) >= 2:
result = True
with open(path_to_interim + 'goodObj.txt', 'a') as newfile:
newfile.write(np.unique(ECO2.ECOID)[0]+'\n')
else:
result = False
with open(path_to_interim + 'badObj.txt', 'a') as newfile:
newfile.write(np.unique(ECO2.ECOID)[0]+'\n')
return result
| mit | -8,252,784,929,073,507,000 | 37.453488 | 80 | 0.51648 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/distutils/tests/test_fcompiler_intel.py | 1 | 1161 | from numpy.testing import *
import numpy.distutils.fcompiler
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"\
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"\
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"\
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions(TestCase):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions(TestCase):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
if __name__ == '__main__':
run_module_suite()
| gpl-3.0 | 4,528,753,986,483,240,000 | 32.147059 | 79 | 0.641688 | false |
docusign/docusign-python-client | docusign_esign/models/payment_details.py | 1 | 17416 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allowed_payment_methods': 'list[str]',
'charge_id': 'str',
'currency_code': 'str',
'currency_code_metadata': 'PropertyMetadata',
'customer_id': 'str',
'custom_metadata': 'str',
'custom_metadata_required': 'bool',
'gateway_account_id': 'str',
'gateway_account_id_metadata': 'PropertyMetadata',
'gateway_display_name': 'str',
'gateway_name': 'str',
'line_items': 'list[PaymentLineItem]',
'payment_option': 'str',
'payment_source_id': 'str',
'signer_values': 'PaymentSignerValues',
'status': 'str',
'total': 'Money'
}
attribute_map = {
'allowed_payment_methods': 'allowedPaymentMethods',
'charge_id': 'chargeId',
'currency_code': 'currencyCode',
'currency_code_metadata': 'currencyCodeMetadata',
'customer_id': 'customerId',
'custom_metadata': 'customMetadata',
'custom_metadata_required': 'customMetadataRequired',
'gateway_account_id': 'gatewayAccountId',
'gateway_account_id_metadata': 'gatewayAccountIdMetadata',
'gateway_display_name': 'gatewayDisplayName',
'gateway_name': 'gatewayName',
'line_items': 'lineItems',
'payment_option': 'paymentOption',
'payment_source_id': 'paymentSourceId',
'signer_values': 'signerValues',
'status': 'status',
'total': 'total'
}
def __init__(self, allowed_payment_methods=None, charge_id=None, currency_code=None, currency_code_metadata=None, customer_id=None, custom_metadata=None, custom_metadata_required=None, gateway_account_id=None, gateway_account_id_metadata=None, gateway_display_name=None, gateway_name=None, line_items=None, payment_option=None, payment_source_id=None, signer_values=None, status=None, total=None): # noqa: E501
"""PaymentDetails - a model defined in Swagger""" # noqa: E501
self._allowed_payment_methods = None
self._charge_id = None
self._currency_code = None
self._currency_code_metadata = None
self._customer_id = None
self._custom_metadata = None
self._custom_metadata_required = None
self._gateway_account_id = None
self._gateway_account_id_metadata = None
self._gateway_display_name = None
self._gateway_name = None
self._line_items = None
self._payment_option = None
self._payment_source_id = None
self._signer_values = None
self._status = None
self._total = None
self.discriminator = None
if allowed_payment_methods is not None:
self.allowed_payment_methods = allowed_payment_methods
if charge_id is not None:
self.charge_id = charge_id
if currency_code is not None:
self.currency_code = currency_code
if currency_code_metadata is not None:
self.currency_code_metadata = currency_code_metadata
if customer_id is not None:
self.customer_id = customer_id
if custom_metadata is not None:
self.custom_metadata = custom_metadata
if custom_metadata_required is not None:
self.custom_metadata_required = custom_metadata_required
if gateway_account_id is not None:
self.gateway_account_id = gateway_account_id
if gateway_account_id_metadata is not None:
self.gateway_account_id_metadata = gateway_account_id_metadata
if gateway_display_name is not None:
self.gateway_display_name = gateway_display_name
if gateway_name is not None:
self.gateway_name = gateway_name
if line_items is not None:
self.line_items = line_items
if payment_option is not None:
self.payment_option = payment_option
if payment_source_id is not None:
self.payment_source_id = payment_source_id
if signer_values is not None:
self.signer_values = signer_values
if status is not None:
self.status = status
if total is not None:
self.total = total
@property
def allowed_payment_methods(self):
"""Gets the allowed_payment_methods of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The allowed_payment_methods of this PaymentDetails. # noqa: E501
:rtype: list[str]
"""
return self._allowed_payment_methods
@allowed_payment_methods.setter
def allowed_payment_methods(self, allowed_payment_methods):
"""Sets the allowed_payment_methods of this PaymentDetails.
# noqa: E501
:param allowed_payment_methods: The allowed_payment_methods of this PaymentDetails. # noqa: E501
:type: list[str]
"""
self._allowed_payment_methods = allowed_payment_methods
@property
def charge_id(self):
"""Gets the charge_id of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The charge_id of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._charge_id
@charge_id.setter
def charge_id(self, charge_id):
"""Sets the charge_id of this PaymentDetails.
# noqa: E501
:param charge_id: The charge_id of this PaymentDetails. # noqa: E501
:type: str
"""
self._charge_id = charge_id
@property
def currency_code(self):
"""Gets the currency_code of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The currency_code of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this PaymentDetails.
# noqa: E501
:param currency_code: The currency_code of this PaymentDetails. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def currency_code_metadata(self):
"""Gets the currency_code_metadata of this PaymentDetails. # noqa: E501
:return: The currency_code_metadata of this PaymentDetails. # noqa: E501
:rtype: PropertyMetadata
"""
return self._currency_code_metadata
@currency_code_metadata.setter
def currency_code_metadata(self, currency_code_metadata):
"""Sets the currency_code_metadata of this PaymentDetails.
:param currency_code_metadata: The currency_code_metadata of this PaymentDetails. # noqa: E501
:type: PropertyMetadata
"""
self._currency_code_metadata = currency_code_metadata
@property
def customer_id(self):
"""Gets the customer_id of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The customer_id of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this PaymentDetails.
# noqa: E501
:param customer_id: The customer_id of this PaymentDetails. # noqa: E501
:type: str
"""
self._customer_id = customer_id
@property
def custom_metadata(self):
"""Gets the custom_metadata of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The custom_metadata of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._custom_metadata
@custom_metadata.setter
def custom_metadata(self, custom_metadata):
"""Sets the custom_metadata of this PaymentDetails.
# noqa: E501
:param custom_metadata: The custom_metadata of this PaymentDetails. # noqa: E501
:type: str
"""
self._custom_metadata = custom_metadata
@property
def custom_metadata_required(self):
"""Gets the custom_metadata_required of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The custom_metadata_required of this PaymentDetails. # noqa: E501
:rtype: bool
"""
return self._custom_metadata_required
@custom_metadata_required.setter
def custom_metadata_required(self, custom_metadata_required):
"""Sets the custom_metadata_required of this PaymentDetails.
# noqa: E501
:param custom_metadata_required: The custom_metadata_required of this PaymentDetails. # noqa: E501
:type: bool
"""
self._custom_metadata_required = custom_metadata_required
@property
def gateway_account_id(self):
"""Gets the gateway_account_id of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The gateway_account_id of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._gateway_account_id
@gateway_account_id.setter
def gateway_account_id(self, gateway_account_id):
"""Sets the gateway_account_id of this PaymentDetails.
# noqa: E501
:param gateway_account_id: The gateway_account_id of this PaymentDetails. # noqa: E501
:type: str
"""
self._gateway_account_id = gateway_account_id
@property
def gateway_account_id_metadata(self):
"""Gets the gateway_account_id_metadata of this PaymentDetails. # noqa: E501
:return: The gateway_account_id_metadata of this PaymentDetails. # noqa: E501
:rtype: PropertyMetadata
"""
return self._gateway_account_id_metadata
@gateway_account_id_metadata.setter
def gateway_account_id_metadata(self, gateway_account_id_metadata):
"""Sets the gateway_account_id_metadata of this PaymentDetails.
:param gateway_account_id_metadata: The gateway_account_id_metadata of this PaymentDetails. # noqa: E501
:type: PropertyMetadata
"""
self._gateway_account_id_metadata = gateway_account_id_metadata
@property
def gateway_display_name(self):
"""Gets the gateway_display_name of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The gateway_display_name of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._gateway_display_name
@gateway_display_name.setter
def gateway_display_name(self, gateway_display_name):
"""Sets the gateway_display_name of this PaymentDetails.
# noqa: E501
:param gateway_display_name: The gateway_display_name of this PaymentDetails. # noqa: E501
:type: str
"""
self._gateway_display_name = gateway_display_name
@property
def gateway_name(self):
"""Gets the gateway_name of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The gateway_name of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._gateway_name
@gateway_name.setter
def gateway_name(self, gateway_name):
"""Sets the gateway_name of this PaymentDetails.
# noqa: E501
:param gateway_name: The gateway_name of this PaymentDetails. # noqa: E501
:type: str
"""
self._gateway_name = gateway_name
@property
def line_items(self):
"""Gets the line_items of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The line_items of this PaymentDetails. # noqa: E501
:rtype: list[PaymentLineItem]
"""
return self._line_items
@line_items.setter
def line_items(self, line_items):
"""Sets the line_items of this PaymentDetails.
# noqa: E501
:param line_items: The line_items of this PaymentDetails. # noqa: E501
:type: list[PaymentLineItem]
"""
self._line_items = line_items
@property
def payment_option(self):
"""Gets the payment_option of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The payment_option of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._payment_option
@payment_option.setter
def payment_option(self, payment_option):
"""Sets the payment_option of this PaymentDetails.
# noqa: E501
:param payment_option: The payment_option of this PaymentDetails. # noqa: E501
:type: str
"""
self._payment_option = payment_option
@property
def payment_source_id(self):
"""Gets the payment_source_id of this PaymentDetails. # noqa: E501
# noqa: E501
:return: The payment_source_id of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._payment_source_id
@payment_source_id.setter
def payment_source_id(self, payment_source_id):
"""Sets the payment_source_id of this PaymentDetails.
# noqa: E501
:param payment_source_id: The payment_source_id of this PaymentDetails. # noqa: E501
:type: str
"""
self._payment_source_id = payment_source_id
@property
def signer_values(self):
"""Gets the signer_values of this PaymentDetails. # noqa: E501
:return: The signer_values of this PaymentDetails. # noqa: E501
:rtype: PaymentSignerValues
"""
return self._signer_values
@signer_values.setter
def signer_values(self, signer_values):
"""Sets the signer_values of this PaymentDetails.
:param signer_values: The signer_values of this PaymentDetails. # noqa: E501
:type: PaymentSignerValues
"""
self._signer_values = signer_values
@property
def status(self):
"""Gets the status of this PaymentDetails. # noqa: E501
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:return: The status of this PaymentDetails. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PaymentDetails.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:param status: The status of this PaymentDetails. # noqa: E501
:type: str
"""
self._status = status
@property
def total(self):
"""Gets the total of this PaymentDetails. # noqa: E501
:return: The total of this PaymentDetails. # noqa: E501
:rtype: Money
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this PaymentDetails.
:param total: The total of this PaymentDetails. # noqa: E501
:type: Money
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaymentDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | 576,533,233,708,825,000 | 30.267504 | 415 | 0.605018 | false |
django-bmf/django-bmf | djangobmf/pagination.py | 2 | 3352 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.core.paginator import Paginator
from django.core.paginator import InvalidPage
from django.template import Context
from django.template import loader
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import NotFound
from rest_framework.pagination import BasePagination
from rest_framework.response import Response
from rest_framework.utils.urls import replace_query_param
from rest_framework.utils.urls import remove_query_param
class PaginationMixin(BasePagination):
template = 'rest_framework/pagination/previous_and_next.html'
invalid_page_message = _('Invalid page "{page_number}": {message}.')
page_size = None
def paginate_queryset(self, queryset, request, view=None):
self.request = request
if not self.page_size:
self.page = None
self.count = queryset.count()
return list(queryset)
paginator = Paginator(queryset, self.page_size)
page_number = request.query_params.get('page', 1)
self.count = paginator.count
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number,
message=six.text_type(exc),
)
raise NotFound(msg)
if paginator.num_pages > 1:
# The browsable API should display pagination controls.
self.display_page_controls = True
return list(self.page)
def get_paginated_response_data(self, data):
if self.page:
return {
'paginator': {
'current': self.page.number,
'count': self.count,
'pages': self.page.paginator.num_pages,
},
'items': data,
}
else:
return {
'paginator': {
'current': 1,
'count': self.count,
'pages': 1,
},
'items': data,
}
def get_paginated_response(self, data):
return Response(self.get_paginated_response_data(data))
def get_next_link(self):
if not self.page or not self.page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self.page.next_page_number()
return replace_query_param(url, 'page', page_number)
def get_previous_link(self):
if not self.page or not self.page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self.page.previous_page_number()
if page_number == 1:
return remove_query_param(url, 'page')
return replace_query_param(url, 'page', page_number)
def get_html_context(self):
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class ModulePagination(PaginationMixin):
page_size = 100
| bsd-3-clause | -7,896,100,331,072,318,000 | 31.230769 | 72 | 0.598449 | false |
BoGoEngine/bogo-osx | main.py | 1 | 1638 | #!/usr/bin/env python
from Cocoa import *
from InputMethodKit import *
from itertools import takewhile
import bogo
class BogoController(IMKInputController):
def __init__(self):
# Cocoa doesn't call this method at all
self.reset()
self.initialized = True
def reset(self):
self.composing_string = ""
self.raw_string = ""
def inputText_client_(self, string, client):
if not hasattr(self, 'initialized'):
self.__init__()
if string == ' ':
self.reset()
return NO
self.raw_string += string
result = bogo.process_sequence(self.raw_string)
same_initial_chars = list(takewhile(lambda tupl: tupl[0] == tupl[1],
zip(self.composing_string,
result)))
n_backspace = len(self.composing_string) - len(same_initial_chars)
string_to_commit = result[len(same_initial_chars):]
start = self.client().length() - n_backspace
length = len(string_to_commit)
self.client().insertText_replacementRange_(
string_to_commit,
NSMakeRange(start, length))
self.composing_string = result
return YES
def main():
pool = NSAutoreleasePool.alloc().init()
connectionName = "Bogo_1_Connection"
identifier = NSBundle.mainBundle().bundleIdentifier()
NSLog(NSBundle.mainBundle().bundleIdentifier())
server = IMKServer.alloc().initWithName_bundleIdentifier_(
connectionName,
"com.ngochin.inputmethod.BoGo")
# NSBundle.loadNibNamed_owner_(
# "MainMenu",
# NSApplication.sharedApplication())
NSLog("here")
NSApplication.sharedApplication().run()
pool.release()
if __name__ == "__main__":
main()
| gpl-3.0 | 5,871,501,967,262,425,000 | 21.75 | 70 | 0.663004 | false |
ibaidev/gplib | gplib/covariance_functions/white_noise.py | 1 | 4027 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Ibai Roman
#
# This file is part of GPlib.
#
# GPlib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GPlib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GPlib. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from .covariance_function import CovarianceFunction
from ..parameters import OptimizableParameter
from ..transformations import LogTransformation
class WhiteNoise(CovarianceFunction):
"""
"""
def __init__(self, data):
scale = 2.0
ov2_min = -10
ov2_max = 10
if np.random.rand() < 0.5:
ov2 = np.random.normal(
loc=np.log(np.std(data['Y'])) - np.log(10),
scale=scale
)
else:
ov2 = np.random.normal(
loc=0,
scale=scale
)
ov2 = np.clip(ov2, ov2_min, ov2_max)
hyperparams = [
OptimizableParameter(
'output_variance', LogTransformation,
default_value=np.exp(ov2),
min_value=np.exp(ov2_min), max_value=np.exp(ov2_max)
)
]
super(WhiteNoise, self).__init__(hyperparams)
def covariance(self, mat_a, mat_b=None, only_diagonal=False):
"""
Measures the distance matrix between solutions of A and B, and
applies the kernel function element-wise to the distance matrix.
:param mat_a: List of solutions in lines and dimensions in columns.
:type mat_a:
:param mat_b: List of solutions in lines and dimensions in columns.
:type mat_b:
:param only_diagonal:
:type only_diagonal:
:return: Result matrix with kernel function applied element-wise.
:rtype:
"""
len_a = len(mat_a)
if mat_b is not None:
len_b = len(mat_b)
return np.zeros((len_a, len_b))
if only_diagonal:
return np.square(self.get_param_value('output_variance')) * \
np.ones((len_a, 1))
return np.square(self.get_param_value('output_variance')) * \
np.eye(len_a)
def dk_dx(self, mat_a, mat_b=None):
"""
Measures gradient of the distance between solutions of A and B in X.
:param mat_a: List of solutions in lines and dimensions in columns.
:param mat_b: List of solutions in lines and dimensions in columns.
:return: 3D array with the gradient in every dimension of X.
"""
raise NotImplementedError("Not Implemented. This is an interface.")
def dk_dtheta(self, mat_a, mat_b=None, trans=False):
"""
Measures gradient of the distance between solutions of A and B in the
hyper-parameter space.
:param mat_a: List of solutions in lines and dimensions in columns.
:type mat_a:
:param mat_b: List of solutions in lines and dimensions in columns.
:type mat_b:
:param trans: Return results in the transformed space.
:type trans:
:return: 3D array with the gradient in every
dimension the length-scale hyper-parameter space.
:rtype:
"""
len_a = len(mat_a)
if mat_b is not None:
len_b = len(mat_b)
return np.zeros((len_a, len_b)),
dk_dov = np.eye(len_a) * \
2.0 * self.get_param_value('output_variance')
if trans:
dk_dov = self.get_hyperparam('output_variance').grad_trans(dk_dov)
return dk_dov,
| gpl-3.0 | -4,278,698,062,277,995,000 | 31.216 | 78 | 0.593991 | false |
elastacloud/libcloud | libcloud/compute/drivers/libvirt_driver.py | 1 | 10168 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import time
import platform
import subprocess
import mimetypes
from os.path import join as pjoin
from collections import defaultdict
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.compute.base import NodeDriver, Node
from libcloud.compute.base import NodeState
from libcloud.compute.types import Provider
from libcloud.utils.networking import is_public_subnet
try:
import libvirt
have_libvirt = True
except ImportError:
have_libvirt = False
class LibvirtNodeDriver(NodeDriver):
"""
Libvirt (http://libvirt.org/) node driver.
To enable debug mode, set LIBVIR_DEBUG environment variable.
"""
type = Provider.LIBVIRT
name = 'Libvirt'
website = 'http://libvirt.org/'
NODE_STATE_MAP = {
0: NodeState.TERMINATED, # no state
1: NodeState.RUNNING, # domain is running
2: NodeState.PENDING, # domain is blocked on resource
3: NodeState.TERMINATED, # domain is paused by user
4: NodeState.TERMINATED, # domain is being shut down
5: NodeState.TERMINATED, # domain is shut off
6: NodeState.UNKNOWN, # domain is crashed
7: NodeState.UNKNOWN, # domain is suspended by guest power management
}
def __init__(self, uri):
"""
:param uri: Hypervisor URI (e.g. vbox:///session, qemu:///system,
etc.).
:type uri: ``str``
"""
if not have_libvirt:
raise RuntimeError('Libvirt driver requires \'libvirt\' Python ' +
'package')
self._uri = uri
self.connection = libvirt.open(uri)
def list_nodes(self):
domains = self.connection.listAllDomains()
nodes = self._to_nodes(domains=domains)
return nodes
def reboot_node(self, node):
domain = self._get_domain_for_node(node=node)
return domain.reboot(flags=0) == 0
def destroy_node(self, node):
domain = self._get_domain_for_node(node=node)
return domain.destroy() == 0
def ex_start_node(self, node):
"""
Start a stopped node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
domain = self._get_domain_for_node(node=node)
return domain.create() == 0
def ex_shutdown_node(self, node):
"""
Shutdown a running node.
Note: Usually this will result in sending an ACPI event to the node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
domain = self._get_domain_for_node(node=node)
return domain.shutdown() == 0
def ex_suspend_node(self, node):
"""
Suspend a running node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
domain = self._get_domain_for_node(node=node)
return domain.suspend() == 0
def ex_resume_node(self, node):
"""
Resume a suspended node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
domain = self._get_domain_for_node(node=node)
return domain.resume() == 0
def ex_take_node_screenshot(self, node, directory, screen=0):
"""
Take a screenshot of a monitoring of a running instance.
:param node: Node to take the screenshot of.
:type node: :class:`libcloud.compute.base.Node`
:param directory: Path where the screenshot will be saved.
:type directory: ``str``
:param screen: ID of the monitor to take the screenshot of.
:type screen: ``int``
:return: Full path where the screenshot has been saved.
:rtype: ``str``
"""
if not os.path.exists(directory) or not os.path.isdir(directory):
raise ValueError('Invalid value for directory argument')
domain = self._get_domain_for_node(node=node)
stream = self.connection.newStream()
mime_type = domain.screenshot(stream=stream, screen=0)
extensions = mimetypes.guess_all_extensions(type=mime_type)
if extensions:
extension = extensions[0]
else:
extension = '.png'
name = 'screenshot-%s%s' % (int(time.time()), extension)
file_path = pjoin(directory, name)
with open(file_path, 'wb') as fp:
def write(stream, buf, opaque):
fp.write(buf)
stream.recvAll(write, None)
try:
stream.finish()
except Exception:
# Finish is not supported by all backends
pass
return file_path
def ex_get_hypervisor_hostname(self):
"""
Return a system hostname on which the hypervisor is running.
"""
hostname = self.connection.getHostname()
return hostname
def ex_get_hypervisor_sysinfo(self):
"""
Retrieve hypervisor system information.
:rtype: ``dict``
"""
xml = self.connection.getSysinfo()
etree = ET.XML(xml)
attributes = ['bios', 'system', 'processor', 'memory_device']
sysinfo = {}
for attribute in attributes:
element = etree.find(attribute)
entries = self._get_entries(element=element)
sysinfo[attribute] = entries
return sysinfo
def _to_nodes(self, domains):
nodes = [self._to_node(domain=domain) for domain in domains]
return nodes
def _to_node(self, domain):
state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
state = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
public_ips, private_ips = [], []
ip_addresses = self._get_ip_addresses_for_domain(domain)
for ip_address in ip_addresses:
if is_public_subnet(ip_address):
public_ips.append(ip_address)
else:
private_ips.append(ip_address)
extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(),
'types': self.connection.getType(),
'used_memory': memory / 1024, 'vcpu_count': vcpu_count,
'used_cpu_time': used_cpu_time}
node = Node(id=domain.ID(), name=domain.name(), state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self, extra=extra)
node._uuid = domain.UUIDString() # we want to use a custom UUID
return node
def _get_ip_addresses_for_domain(self, domain):
"""
Retrieve IP addresses for the provided domain.
Note: This functionality is currently only supported on Linux and
only works if this code is run on the same machine as the VMs run
on.
:return: IP addresses for the provided domain.
:rtype: ``list``
"""
result = []
if platform.system() != 'Linux':
# Only Linux is supported atm
return result
mac_addresses = self._get_mac_addresses_for_domain(domain=domain)
cmd = ['arp', '-an']
child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = child.communicate()
arp_table = self._parse_arp_table(arp_output=stdout)
for mac_address in mac_addresses:
if mac_address in arp_table:
ip_addresses = arp_table[mac_address]
result.extend(ip_addresses)
return result
def _get_mac_addresses_for_domain(self, domain):
"""
Parses network interface MAC addresses from the provided domain.
"""
xml = domain.XMLDesc()
etree = ET.XML(xml)
elems = etree.findall("devices/interface[@type='network']/mac")
result = []
for elem in elems:
mac_address = elem.get('address')
result.append(mac_address)
return result
def _get_domain_for_node(self, node):
"""
Return libvirt domain object for the provided node.
"""
domain = self.connection.lookupByUUIDString(node.uuid)
return domain
def _get_entries(self, element):
"""
Parse entries dictionary.
:rtype: ``dict``
"""
elements = element.findall('entry')
result = {}
for element in elements:
name = element.get('name')
value = element.text
result[name] = value
return result
def _parse_arp_table(self, arp_output):
"""
Parse arp command output and return a dictionary which maps mac address
to an IP address.
:return: Dictionary which maps mac address to IP address.
:rtype: ``dict``
"""
lines = arp_output.split('\n')
arp_table = defaultdict(list)
for line in lines:
match = re.match('.*?\((.*?)\) at (.*?)\s+', line)
if not match:
continue
groups = match.groups()
ip_address = groups[0]
mac_address = groups[1]
arp_table[mac_address].append(ip_address)
return arp_table
| apache-2.0 | 3,972,654,172,272,321,500 | 29.443114 | 79 | 0.590382 | false |
srkukarni/heron | heron/tools/cli/src/python/restart.py | 1 | 2128 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' restart.py '''
from heron.common.src.python.utils.log import Log
import heron.tools.cli.src.python.args as args
import heron.tools.cli.src.python.cli_helper as cli_helper
import heron.tools.common.src.python.utils.config as config
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
parser = subparsers.add_parser(
'restart',
help='Restart a topology',
usage="%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]",
add_help=True)
args.add_titles(parser)
args.add_cluster_role_env(parser)
args.add_topology(parser)
parser.add_argument(
'container-id',
nargs='?',
type=int,
default=-1,
help='Identifier of the container to be restarted')
args.add_config(parser)
args.add_service_url(parser)
args.add_verbose(parser)
parser.set_defaults(subcommand='restart')
return parser
# pylint: disable=unused-argument
def run(command, parser, cl_args, unknown_args):
'''
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
'''
Log.debug("Restart Args: %s", cl_args)
container_id = cl_args['container-id']
if cl_args['deploy_mode'] == config.SERVER_MODE:
dict_extra_args = {"container_id": str(container_id)}
return cli_helper.run_server(command, cl_args, "restart topology", extra_args=dict_extra_args)
else:
list_extra_args = ["--container_id", str(container_id)]
return cli_helper.run_direct(command, cl_args, "restart topology", extra_args=list_extra_args)
| apache-2.0 | -3,431,443,403,372,592,000 | 30.761194 | 98 | 0.705357 | false |
Muon/s3o-tools | s3o-optimize.py | 1 | 3528 | #!/usr/bin/env python
from s3o import S3O
from optparse import OptionParser
from glob import glob
import vertex_cache
def recursively_optimize_pieces(piece):
optimize_piece(piece)
for child in piece.children:
recursively_optimize_pieces(child)
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield tuple(l[i:i + n])
def optimize_piece(piece):
remap = {}
new_indices = []
for index in piece.indices:
vertex = piece.vertices[index]
if vertex not in remap:
remap[vertex] = len(remap)
new_indices.append(remap[vertex])
new_vertices = [(index, vertex) for vertex, index in remap.items()]
new_vertices.sort()
new_vertices = [vertex for index, vertex in new_vertices]
if piece.primitive_type == "triangles" and len(new_indices) > 0:
tris = list(chunks(new_indices, 3))
acmr = vertex_cache.average_transform_to_vertex_ratio(tris)
tmp = vertex_cache.get_cache_optimized_triangles(tris)
acmr_new = vertex_cache.average_transform_to_vertex_ratio(tmp)
if acmr_new < acmr:
new_indices = []
for tri in tmp:
new_indices.extend(tri)
vertex_map = []
remapped_indices = []
for index in new_indices:
try:
new_index = vertex_map.index(index)
except ValueError:
new_index = len(vertex_map)
vertex_map.append(index)
remapped_indices.append(new_index)
new_vertices = [new_vertices[index] for index in vertex_map]
new_indices = remapped_indices
piece.indices = new_indices
piece.vertices = new_vertices
def sizeof_fmt(num):
for x in ['bytes', 'KB', 'MB', 'GB']:
if abs(num) < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
if __name__ == '__main__':
parser = OptionParser(usage="%prog [options] FILES", version="%prog 0.1",
description="Optimize a Spring S3O file by "
"removing redundant data.")
parser.add_option("-d", "--dry-run", action="store_true",
default=False, dest="is_dry",
help="show output summary without committing changes")
parser.add_option("-q", "--quiet", action="store_true",
default=False, dest="silence_output",
help="silence detailed optimization output")
options, args = parser.parse_args()
if len(args) < 1:
parser.error("insufficient arguments")
dry = options.is_dry
silence_output = options.silence_output
if len(args) == 1:
filenames = glob(args[0])
else:
filenames = args
delta_total = 0
for filename in filenames:
with open(filename, 'rb+') as input_file:
data = input_file.read()
model = S3O(data)
recursively_optimize_pieces(model.root_piece)
optimized_data = model.serialize()
delta_size = len(optimized_data) - len(data)
delta_total += delta_size
if not silence_output:
print("modified %s: "
"size change: %+d bytes" % (filename, delta_size))
if not dry:
input_file.seek(0)
input_file.truncate()
input_file.write(optimized_data)
print("total size difference: %s" % sizeof_fmt(delta_total))
| mit | -5,973,406,678,418,618,000 | 29.413793 | 77 | 0.571995 | false |
pywinauto/pywinauto | examples/notepad_slow.py | 1 | 9097 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import os.path
import sys
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
print("Setting timings to slow settings, may be necessary for")
print("slow applications or slow machines.")
Timings.slow()
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
def run_notepad():
"""Run notepad and do some small stuff with it"""
start = time.time()
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
doc_props = app.window(name_re = ".*Properties$")
doc_props.wait('exists', timeout=40)
# ----- Document Properties Dialog ----
# some tab control selections
# Two ways of selecting tabs with indices...
doc_props.TabCtrl.select(0)
doc_props.TabCtrl.select(1)
try:
doc_props.TabCtrl.select(2)
except IndexError:
# not all users have 3 tabs in this dialog
print('Skip 3rd tab selection...')
# or with text...
doc_props.TabCtrl.select("PaperQuality")
try:
doc_props.TabCtrl.select("JobRetention")
except MatchError:
# some people do not have the "Job Retention" tab
print('Skip "Job Retention" tab...')
# doc_props.TabCtrl.select("Layout")
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(name_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text("I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select("Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys("{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just be gone - because click worked
# - dialog might be waiting to disappear
# so can't wait for next dialog or for it to be disabled
# - dialog might be waiting to display message box so can't wait
# for it to be gone or for the main dialog to be enabled.
# while the dialog exists wait upto 30 seconds (and yes it can
# take that long on my computer sometimes :-( )
app.SaveAsDialog2.Cancel.wait_not('enabled')
# If file exists - it asks you if you want to overwrite
try:
app.SaveAs.Yes.wait('exists').close_click()
except MatchError:
print('Skip overwriting...')
# exit notepad
app.Notepad.menu_select("File->Exit")
#if not run_with_appdata:
# app.WriteAppData(os.path.join(scriptdir, "Notepad_fast.pkl"))
print("That took %.3f to run"% (time.time() - start))
if __name__ == "__main__":
run_notepad()
| bsd-3-clause | 718,532,393,781,119,500 | 34.388 | 95 | 0.665714 | false |
brguez/TEIBA | src/python/retrotransposition_eventTypes.chart.py | 1 | 14275 | #!/usr/bin/env python
#coding: utf-8
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
####### CLASSES #######
class cohort():
"""
.....................
Methods:
-
"""
def __init__(self):
"""
"""
self.VCFdict = {}
def read_VCFs(self, inputPath):
"""
"""
inputFile = open(inputPath, 'r')
info("Read input VCFs ")
# Per iteration, read a VCF, generate a VCF object and add it to the cohort
for line in inputFile:
line = line.rstrip('\n')
line = line.split("\t")
projectCode = line[0]
sampleId = line[1]
VCFfile = line[2]
# Create VCF object
VCFObj = formats.VCF()
info("Reading " + VCFfile + "...")
# Input VCF available
if os.path.isfile(VCFfile):
# Read VCF and add information to VCF object
VCFObj.read_VCF(VCFfile)
# Add projectCode and sampleId information to the genotype field in each MEI object
for MEIObject in VCFObj.lineList:
MEIObject.format = MEIObject.format + ':SAMPLEID'
MEIObject.genotype = MEIObject.genotype + ':' + sampleId
# Initialize the donor list for a given project if needed
if projectCode not in self.VCFdict:
self.VCFdict[projectCode] = []
# Add donor VCF to cohort
self.VCFdict[projectCode].append(VCFObj)
else:
print "[ERROR] Input file does not exist"
####### FUNCTIONS #######
def autolabel(rects, ax, valuesList):
# Get x-axis height to calculate label position from.
(x_left, x_right) = ax.get_xlim()
x_length = x_right - x_left
index = 0
for rect in rects:
value = valuesList[index]
ax.text(1.04*x_length, rect.get_y(),
'%d' % int(value),
ha='center', va='bottom', fontsize=8)
index += 1
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
from operator import itemgetter, attrgetter, methodcaller
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('inputPath', help='Tabular text file containing one row per sample with the following consecutive fields: projectCode sampleId vcf_path')
parser.add_argument('pseudoPath', help='[PROVISIONAL] Tabular text file containing one row per projectCode with the following consecutive fields: projectCode Nb.pseudogenes')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.')
args = parser.parse_args()
inputPath = args.inputPath
pseudoPath = args.pseudoPath
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputPath: ", inputPath
print "pseudoPath: ", pseudoPath
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
############# BAR CHART ###############
### 1. Initialize cohort object
cohortObj = cohort()
### 2. Read VCF files, create VCF objects and organize them
cohortObj.read_VCFs(inputPath)
### 3. Make a dictionary containing per tumor type the total number of events for each retrotrotransposition insertion type:
# * L1-solo
# * L1-td
# * Alu
# * SVA
# * ERVK
# * processed-pseudogene
eventCountsDict = {}
for projectCode in cohortObj.VCFdict:
## Initialize category counts
eventCountsDict[projectCode] = {}
eventCountsDict[projectCode]["L1-solo"] = 0
eventCountsDict[projectCode]["L1-transduction"] = 0
eventCountsDict[projectCode]["Alu"] = 0
eventCountsDict[projectCode]["SVA"] = 0
eventCountsDict[projectCode]["ERVK"] = 0
eventCountsDict[projectCode]["processed-pseudogene"] = 0
## Count total number of donors per tumor type and number of donor per category
for VCFObj in cohortObj.VCFdict[projectCode]:
for MEIObj in VCFObj.lineList:
MEIClass = MEIObj.infoDict["CLASS"]
MEIType = MEIObj.infoDict["TYPE"]
## a) L1-solo:
if (MEIClass == "L1") and (MEIType == "TD0"):
eventCountsDict[projectCode]["L1-solo"] += 1
## b) L1-transduction
elif (MEIType == "TD1") or (MEIType == "TD2"):
eventCountsDict[projectCode]["L1-transduction"] += 1
## c) Alu
# Note: I added a provisional SCORE filtering
# to ask for at least one breakpoint reconstructed
elif (MEIClass == "Alu") and (int(MEIObj.infoDict["SCORE"]) > 2):
eventCountsDict[projectCode]["Alu"] += 1
## d) SVA
# Note: I added a provisional SCORE filtering
# to ask for at least one breakpoint reconstructed
elif (MEIClass == "SVA") and (int(MEIObj.infoDict["SCORE"]) > 2):
eventCountsDict[projectCode]["SVA"] += 1
## e) ERVK
# Note: I added a provisional SCORE filtering
# to ask for at least one breakpoint reconstructed
elif (MEIClass == "ERVK") and (int(MEIObj.infoDict["SCORE"]) > 2):
eventCountsDict[projectCode]["ERVK"] += 1
## f) Processed pseudogene
elif (MEIClass == "PSD"):
eventCountsDict[projectCode]["processed-pseudogene"] += 1
## g) Unexpected value
#else:
# print MEIObj.infoDict["CLASS"], "[ERROR] Unexpected MEI Class value"
print "eventCountsDict: ", eventCountsDict
### 4. Make dataframe with the number of events per type for each tumor type
# Project codes: columns
# Event type number of events:
# ProjectCode1 ProjectCode2 ProjectCode3....
# L1-solo X1 Y1 Z1
# L1-transduction X2 Y2 Z2
# ...
eventCountsDataframe = pd.DataFrame(eventCountsDict)
print "eventCountsDataframe: ", eventCountsDataframe
### PROVISIONAL -- ADD PSEUDOGENE INFORMATION
pseudoCounts = open(pseudoPath, 'r')
# Read file line by line
for line in pseudoCounts:
line = line.rstrip('\r\n')
## Discard header
if not line.startswith("#"):
fieldsList = line.split("\t")
projectCode = str(fieldsList[0])
nbPseudogenes = fieldsList[1]
# Add nb. pseudogenes to the counts dataframe
eventCountsDataframe.set_value("processed-pseudogene", projectCode, nbPseudogenes)
print "eventCountsDataframePseudo: ", eventCountsDataframe
### 5. Make dataframe with the percentage of events per type for each tumor type
# Project codes: columns
# Categories % samples:
# ProjectCode1 ProjectCode2 ProjectCode3....
# L1-solo X1% Y1% Z1%
# L1-transduction X2% Y2% Z2%
nbEventsPerTumorTypeSerie = eventCountsDataframe.sum(axis=0)
print "nbEventsPerTumorTypeSerie:" , nbEventsPerTumorTypeSerie
eventTypes = eventCountsDataframe.index
projecCodes = eventCountsDataframe.columns
eventPercDataframe = pd.DataFrame(index=eventTypes, columns=projecCodes)
# Iterate over row index labels (activity categories)
for eventType in eventTypes:
# Iterate over column index labels (project codes)
for projectCode in projecCodes:
eventCountProjectCode = eventCountsDataframe.loc[eventType, projectCode]
nbEventsInTumortype = nbEventsPerTumorTypeSerie.loc[projectCode]
# Compute the percentage
eventPercTumorType = float(eventCountProjectCode)/float(nbEventsInTumortype) * 100
# Add source element contribution to dataframe
eventPercDataframe.set_value(eventType, projectCode, eventPercTumorType)
print "eventPercDataframe: ", eventPercDataframe
## Order dataframe columns (tumor types) in the same order as for the chart generated in "retrotranspositionRates.chart.py"
tumorTypeList = ['ESAD', 'HNSC', 'COAD', 'LUSC', 'STAD', 'UCEC', 'PRAD', 'MELA', 'BOCA', 'PACA', 'BRCA', 'LIRI', 'READ', 'CESC', 'OV', 'SARC', 'LIHC', 'GBM', 'THCA', 'BLCA', 'GACA', 'PAEN', 'KICH', 'BTCA', 'ORCA', 'SKCM', 'LINC', 'KIRP', 'LGG', 'LUAD', 'KIRC', 'DLBC', 'EOPC', 'LAML', 'RECA', 'CMDI', 'LICA', 'MALY', 'PBCA', 'CLLE']
# I need to reverse it to have the bars correctly placed...
tumorTypeList.reverse()
eventPercSortedDataframe = eventPercDataframe.reindex_axis(tumorTypeList, axis=1)
print "eventPercSortedDataframe: ", eventPercSortedDataframe
### 6. Make list per event type containing the percentage of events in each tumor category
# list 0 insertions [%ProjectCode1, %ProjectCode2, ... ]
# list 1-10 insertions [%ProjectCode1, %ProjectCode2, ... ]
# ...
AluList, ERVKList, L1SoloList, L1TDList, SVAList, PSDList = eventPercSortedDataframe.values.tolist()
### 7. Make ordered list containing the total number of insertions per tumor type
nbEventsPerTumorTypeSortedSerie = nbEventsPerTumorTypeSerie.reindex(tumorTypeList)
### 8. Make bar plot
# Note: I will not represent ERVK as we only have one insertion in the full PCAWG cohort...
ypos = np.arange(1, len(AluList) + 1) # the y locations for the groups
height = 0.75 # the width of the bars: can also be len(x) sequence
fig = plt.figure(figsize=(7, 12))
# fig.suptitle('Number of samples', fontsize=12)
ax = fig.add_subplot(111)
ax.yaxis.set_label_position("right")
plt.ylabel('Total number of MEI', fontsize=10, labelpad=40)
plt.xlabel('% MEI', fontsize=10)
p1 = ax.barh(ypos, L1SoloList, color='#aed3e3', alpha=0.90, edgecolor='#000000', height=height, align='center')
p2 = ax.barh(ypos, L1TDList, color='#ed1f24', alpha=0.90, edgecolor='#000000', height=height, align='center',
left=[i for i in L1SoloList])
p3 = ax.barh(ypos, AluList, color='#59bd7d', alpha=0.90, edgecolor='#000000', height=height, align='center',
left=[i+j for i,j in zip(L1SoloList, L1TDList)])
p4 = ax.barh(ypos, SVAList, color='#faa41a', alpha=0.90, edgecolor='#000000', height=height, align='center',
left=[i+j+x for i,j,x in zip(L1SoloList, L1TDList, AluList)])
p5 = ax.barh(ypos, PSDList, color='#8B4513', alpha=0.80, edgecolor='#000000', height=height, align='center',
left=[i+j+x+z for i,j,x,z in zip(L1SoloList, L1TDList, AluList, SVAList)])
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
## Customize axis
plot_xmargin = 20
plot_ymargin = 4
x0, x1, y0, y1 = plt.axis()
plt.axis((x0,
x1 - plot_xmargin,
y0,
y1 - plot_ymargin))
## Customize ticks
plt.xticks(np.arange(0, 100.001, 10), fontsize=8)
plt.yticks(ypos, tumorTypeList, fontsize=8)
# Rotate them
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
## Add the number of samples per tumor type on the top of each bar
nbEventsPerTumorList = nbEventsPerTumorTypeSortedSerie.values.tolist()
print "nbEventsPerTumorTypeSortedSerie: ", nbEventsPerTumorTypeSortedSerie
autolabel(p5, ax, nbEventsPerTumorList) ## autolabel function
## Make legend
circle1 = mpatches.Circle((0, 0), 5, color='#aed3e3', alpha=0.90)
circle2 = mpatches.Circle((0, 0), 5, color='#ed1f24', alpha=0.90)
circle3 = mpatches.Circle((0, 0), 5, color='#59bd7d', alpha=0.90)
circle4 = mpatches.Circle((0, 0), 5, color='#faa41a', alpha=0.90)
circle5 = mpatches.Circle((0, 0), 5, color='#8B4513', alpha=0.90)
l = plt.figlegend((circle1, circle2, circle3, circle4, circle5), ("L1-solo", "L1-transduction", "Alu", "SVA", "processed-pseudogene"), loc = 'upper center', ncol=5, labelspacing=0.75, fontsize=8, fancybox=True)
## Save figure
fileName = outDir + "/PCAWG_retrotransposition_events_tumorTypes.pdf"
plt.savefig(fileName)
############# PIE CHART ###############
## 1. Gather data
regionsList = []
## For project code
for projectCode in cohortObj.VCFdict:
## For donor
for VCFObj in cohortObj.VCFdict[projectCode]:
## For MEI
for MEIObj in VCFObj.lineList:
if (MEIObj.infoDict['REGION']=="splicing") or (MEIObj.infoDict['REGION']=="upstream,downstream") or (MEIObj.infoDict['REGION']=="upstream") or (MEIObj.infoDict['REGION']=="downstream"):
region = "Other"
elif (MEIObj.infoDict['REGION']=="UTR5") or (MEIObj.infoDict['REGION']=="UTR3") or (MEIObj.infoDict['REGION']=="UTR5,UTR3") or (MEIObj.infoDict['REGION']=="UTR3,UTR5"):
region = "UTR"
elif (MEIObj.infoDict['REGION']=="ncRNA_exonic") or (MEIObj.infoDict['REGION']=="ncRNA_intronic") or (MEIObj.infoDict['REGION']=="ncRNA_splicing"):
region = "ncRNA"
else:
region = MEIObj.infoDict['REGION']
regionsList.append(region)
regionTuples = [(x, int(regionsList.count(x))) for x in set(regionsList)]
regionList = [list(t) for t in zip(*regionTuples)]
labels = regionList[0]
sizes = regionList[1]
## 2. Make pie chart
fig = plt.figure(figsize=(6,6))
fig.suptitle('Somatic MEI functional spectrum', fontsize=16)
colors = ['#008000', '#A67D3D', '#87CEFA', '#ff0000', '#FFD700', '#FFA500']
patches, texts, perc = plt.pie(sizes, colors=colors, startangle=90, autopct='%1.1f%%', pctdistance=1.2, labeldistance=1)
plt.legend(patches, labels, loc="best", fontsize=11)
##### Save figure
fileName = outDir + "/PCAWG_somatic_funcSpectrum_piechart.pdf"
plt.savefig(fileName)
## End ##
print
print "***** Finished! *****"
print
| gpl-3.0 | 7,796,183,208,989,563,000 | 33.589806 | 332 | 0.633359 | false |
davidwilson-85/easymap | graphic_output/Pillow-4.2.1/Tests/test_file_gimpgradient.py | 1 | 2789 | from helper import unittest, PillowTestCase
from PIL import GimpGradientFile
class TestImage(PillowTestCase):
def test_linear_pos_le_middle(self):
# Arrange
middle = 0.5
pos = 0.25
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.25)
def test_linear_pos_le_small_middle(self):
# Arrange
middle = 1e-11
pos = 1e-12
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.0)
def test_linear_pos_gt_middle(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.75)
def test_linear_pos_gt_small_middle(self):
# Arrange
middle = 1 - 1e-11
pos = 1 - 1e-12
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 1.0)
def test_curved(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.curved(middle, pos)
# Assert
self.assertEqual(ret, 0.75)
def test_sine(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sine(middle, pos)
# Assert
self.assertEqual(ret, 0.8535533905932737)
def test_sphere_increasing(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sphere_increasing(middle, pos)
# Assert
self.assertAlmostEqual(ret, 0.9682458365518543)
def test_sphere_decreasing(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sphere_decreasing(middle, pos)
# Assert
self.assertEqual(ret, 0.3385621722338523)
def test_load_via_imagepalette(self):
# Arrange
from PIL import ImagePalette
test_file = "Tests/images/gimp_gradient.ggr"
# Act
palette = ImagePalette.load(test_file)
# Assert
# load returns raw palette information
self.assertEqual(len(palette[0]), 1024)
self.assertEqual(palette[1], "RGBA")
def test_load_1_3_via_imagepalette(self):
# Arrange
from PIL import ImagePalette
# GIMP 1.3 gradient files contain a name field
test_file = "Tests/images/gimp_gradient_with_name.ggr"
# Act
palette = ImagePalette.load(test_file)
# Assert
# load returns raw palette information
self.assertEqual(len(palette[0]), 1024)
self.assertEqual(palette[1], "RGBA")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -5,356,480,336,562,170,000 | 21.312 | 62 | 0.564001 | false |
irl/gajim | src/common/contacts.py | 1 | 32063 | # -*- coding:utf-8 -*-
## src/common/contacts.py
##
## Copyright (C) 2006 Dimitur Kirov <dkirov AT gmail.com>
## Travis Shirk <travis AT pobox.com>
## Nikos Kouremenos <kourem AT gmail.com>
## Copyright (C) 2006-2014 Yann Leboulanger <asterix AT lagaule.org>
## Jean-Marie Traissard <jim AT lapin.org>
## Copyright (C) 2007 Lukas Petrovicky <lukas AT petrovicky.net>
## Tomasz Melcer <liori AT exroot.org>
## Julien Pivotto <roidelapluie AT gmail.com>
## Copyright (C) 2007-2008 Stephan Erb <steve-e AT h3c.de>
## Copyright (C) 2008 Brendan Taylor <whateley AT gmail.com>
## Jonathan Schleifer <js-gajim AT webkeks.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
from functools import cmp_to_key
try:
from common import caps_cache
from common.account import Account
import common.gajim
except ImportError as e:
if __name__ != "__main__":
raise ImportError(str(e))
class XMPPEntity(object):
"""
Base representation of entities in XMPP
"""
def __init__(self, jid, account, resource):
self.jid = jid
self.resource = resource
self.account = account
class CommonContact(XMPPEntity):
def __init__(self, jid, account, resource, show, status, name,
our_chatstate, chatstate, client_caps=None):
XMPPEntity.__init__(self, jid, account, resource)
self.show = show
self.status = status
self.name = name
self.client_caps = client_caps or caps_cache.NullClientCaps()
# please read xep-85 http://www.xmpp.org/extensions/xep-0085.html
# this holds what WE SEND to contact (our current chatstate)
self.our_chatstate = our_chatstate
# this is contact's chatstate
self.chatstate = chatstate
def get_full_jid(self):
raise NotImplementedError
def get_shown_name(self):
raise NotImplementedError
def supports(self, requested_feature):
"""
Return True if the contact has advertised to support the feature
identified by the given namespace. False otherwise.
"""
if self.show == 'offline':
# Unfortunately, if all resources are offline, the contact
# includes the last resource that was online. Check for its
# show, so we can be sure it's existant. Otherwise, we still
# return caps for a contact that has no resources left.
return False
else:
return caps_cache.client_supports(self.client_caps, requested_feature)
class Contact(CommonContact):
"""
Information concerning a contact
"""
def __init__(self, jid, account, name='', groups=[], show='', status='',
sub='', ask='', resource='', priority=0, keyID='', client_caps=None,
our_chatstate=None, chatstate=None, last_status_time=None, msg_id=None,
last_activity_time=None):
if not isinstance(jid, str):
print('no str')
CommonContact.__init__(self, jid, account, resource, show, status, name,
our_chatstate, chatstate, client_caps=client_caps)
self.contact_name = '' # nick choosen by contact
self.groups = [i for i in set(groups)] # filter duplicate values
self.sub = sub
self.ask = ask
self.priority = priority
self.keyID = keyID
self.msg_id = msg_id
self.last_status_time = last_status_time
self.last_activity_time = last_activity_time
self.pep = {}
def get_full_jid(self):
if self.resource:
return self.jid + '/' + self.resource
return self.jid
def get_shown_name(self):
if self.name:
return self.name
if self.contact_name:
return self.contact_name
return self.jid.split('@')[0]
def get_shown_groups(self):
if self.is_observer():
return [_('Observers')]
elif self.is_groupchat():
return [_('Groupchats')]
elif self.is_transport():
return [_('Transports')]
elif not self.groups:
return [_('General')]
else:
return self.groups
def is_hidden_from_roster(self):
"""
If contact should not be visible in roster
"""
# XEP-0162: http://www.xmpp.org/extensions/xep-0162.html
if self.is_transport():
return False
if self.sub in ('both', 'to'):
return False
if self.sub in ('none', 'from') and self.ask == 'subscribe':
return False
if self.sub in ('none', 'from') and (self.name or len(self.groups)):
return False
if _('Not in Roster') in self.groups:
return False
return True
def is_observer(self):
# XEP-0162: http://www.xmpp.org/extensions/xep-0162.html
is_observer = False
if self.sub == 'from' and not self.is_transport()\
and self.is_hidden_from_roster():
is_observer = True
return is_observer
def is_groupchat(self):
for account in common.gajim.gc_connected:
if self.jid in common.gajim.gc_connected[account]:
return True
return False
def is_transport(self):
# if not '@' or '@' starts the jid then contact is transport
return self.jid.find('@') <= 0
class GC_Contact(CommonContact):
"""
Information concerning each groupchat contact
"""
def __init__(self, room_jid, account, name='', show='', status='', role='',
affiliation='', jid='', resource='', our_chatstate=None,
chatstate=None):
CommonContact.__init__(self, jid, account, resource, show, status, name,
our_chatstate, chatstate)
self.room_jid = room_jid
self.role = role
self.affiliation = affiliation
def get_full_jid(self):
return self.room_jid + '/' + self.name
def get_shown_name(self):
return self.name
def as_contact(self):
"""
Create a Contact instance from this GC_Contact instance
"""
return Contact(jid=self.get_full_jid(), account=self.account,
name=self.name, groups=[], show=self.show, status=self.status,
sub='none', client_caps=self.client_caps)
class LegacyContactsAPI:
"""
This is a GOD class for accessing contact and groupchat information.
The API has several flaws:
* it mixes concerns because it deals with contacts, groupchats,
groupchat contacts and metacontacts
* some methods like get_contact() may return None. This leads to
a lot of duplication all over Gajim because it is not sure
if we receive a proper contact or just None.
It is a long way to cleanup this API. Therefore just stick with it
and use it as before. We will try to figure out a migration path.
"""
def __init__(self):
self._metacontact_manager = MetacontactManager(self)
self._accounts = {}
def change_account_name(self, old_name, new_name):
self._accounts[new_name] = self._accounts[old_name]
self._accounts[new_name].name = new_name
del self._accounts[old_name]
self._metacontact_manager.change_account_name(old_name, new_name)
def add_account(self, account_name):
self._accounts[account_name] = Account(account_name, Contacts(),
GC_Contacts())
self._metacontact_manager.add_account(account_name)
def get_accounts(self):
return self._accounts.keys()
def remove_account(self, account):
del self._accounts[account]
self._metacontact_manager.remove_account(account)
def create_contact(self, jid, account, name='', groups=[], show='',
status='', sub='', ask='', resource='', priority=0, keyID='',
client_caps=None, our_chatstate=None, chatstate=None, last_status_time=None,
last_activity_time=None):
# Use Account object if available
account = self._accounts.get(account, account)
return Contact(jid=jid, account=account, name=name, groups=groups,
show=show, status=status, sub=sub, ask=ask, resource=resource,
priority=priority, keyID=keyID, client_caps=client_caps,
our_chatstate=our_chatstate, chatstate=chatstate,
last_status_time=last_status_time,
last_activity_time=last_activity_time)
def create_self_contact(self, jid, account, resource, show, status, priority,
name='', keyID=''):
conn = common.gajim.connections[account]
nick = name or common.gajim.nicks[account]
account = self._accounts.get(account, account) # Use Account object if available
self_contact = self.create_contact(jid=jid, account=account,
name=nick, groups=['self_contact'], show=show, status=status,
sub='both', ask='none', priority=priority, keyID=keyID,
resource=resource)
self_contact.pep = conn.pep
return self_contact
def create_not_in_roster_contact(self, jid, account, resource='', name='',
keyID=''):
# Use Account object if available
account = self._accounts.get(account, account)
return self.create_contact(jid=jid, account=account, resource=resource,
name=name, groups=[_('Not in Roster')], show='not in roster',
status='', sub='none', keyID=keyID)
def copy_contact(self, contact):
return self.create_contact(contact.jid, contact.account,
name=contact.name, groups=contact.groups, show=contact.show,
status=contact.status, sub=contact.sub, ask=contact.ask,
resource=contact.resource, priority=contact.priority,
keyID=contact.keyID, client_caps=contact.client_caps,
our_chatstate=contact.our_chatstate, chatstate=contact.chatstate,
last_status_time=contact.last_status_time,
last_activity_time=contact.last_activity_time)
def add_contact(self, account, contact):
if account not in self._accounts:
self.add_account(account)
return self._accounts[account].contacts.add_contact(contact)
def remove_contact(self, account, contact):
if account not in self._accounts:
return
return self._accounts[account].contacts.remove_contact(contact)
def remove_jid(self, account, jid, remove_meta=True):
self._accounts[account].contacts.remove_jid(jid)
if remove_meta:
self._metacontact_manager.remove_metacontact(account, jid)
def get_contacts(self, account, jid):
return self._accounts[account].contacts.get_contacts(jid)
def get_contact(self, account, jid, resource=None):
return self._accounts[account].contacts.get_contact(jid, resource=resource)
def iter_contacts(self, account):
for contact in self._accounts[account].contacts.iter_contacts():
yield contact
def get_contact_from_full_jid(self, account, fjid):
return self._accounts[account].contacts.get_contact_from_full_jid(fjid)
def get_first_contact_from_jid(self, account, jid):
return self._accounts[account].contacts.get_first_contact_from_jid(jid)
def get_contacts_from_group(self, account, group):
return self._accounts[account].contacts.get_contacts_from_group(group)
def get_contacts_jid_list(self, account):
return self._accounts[account].contacts.get_contacts_jid_list()
def get_jid_list(self, account):
return self._accounts[account].contacts.get_jid_list()
def change_contact_jid(self, old_jid, new_jid, account):
return self._accounts[account].change_contact_jid(old_jid, new_jid)
def get_highest_prio_contact_from_contacts(self, contacts):
if not contacts:
return None
prim_contact = contacts[0]
for contact in contacts[1:]:
if int(contact.priority) > int(prim_contact.priority):
prim_contact = contact
return prim_contact
def get_contact_with_highest_priority(self, account, jid):
contacts = self.get_contacts(account, jid)
if not contacts and '/' in jid:
# jid may be a fake jid, try it
room, nick = jid.split('/', 1)
contact = self.get_gc_contact(account, room, nick)
return contact
return self.get_highest_prio_contact_from_contacts(contacts)
def get_nb_online_total_contacts(self, accounts=[], groups=[]):
"""
Return the number of online contacts and the total number of contacts
"""
if accounts == []:
accounts = self.get_accounts()
nbr_online = 0
nbr_total = 0
for account in accounts:
our_jid = common.gajim.get_jid_from_account(account)
for jid in self.get_jid_list(account):
if jid == our_jid:
continue
if common.gajim.jid_is_transport(jid) and not \
_('Transports') in groups:
# do not count transports
continue
if self.has_brother(account, jid, accounts) and not \
self.is_big_brother(account, jid, accounts):
# count metacontacts only once
continue
contact = self._accounts[account].contacts._contacts[jid][0]
if _('Not in roster') in contact.groups:
continue
in_groups = False
if groups == []:
in_groups = True
else:
for group in groups:
if group in contact.get_shown_groups():
in_groups = True
break
if in_groups:
if contact.show not in ('offline', 'error'):
nbr_online += 1
nbr_total += 1
return nbr_online, nbr_total
def __getattr__(self, attr_name):
# Only called if self has no attr_name
if hasattr(self._metacontact_manager, attr_name):
return getattr(self._metacontact_manager, attr_name)
else:
raise AttributeError(attr_name)
def create_gc_contact(self, room_jid, account, name='', show='', status='',
role='', affiliation='', jid='', resource=''):
account = self._accounts.get(account, account) # Use Account object if available
return GC_Contact(room_jid, account, name, show, status, role, affiliation, jid,
resource)
def add_gc_contact(self, account, gc_contact):
return self._accounts[account].gc_contacts.add_gc_contact(gc_contact)
def remove_gc_contact(self, account, gc_contact):
return self._accounts[account].gc_contacts.remove_gc_contact(gc_contact)
def remove_room(self, account, room_jid):
return self._accounts[account].gc_contacts.remove_room(room_jid)
def get_gc_list(self, account):
return self._accounts[account].gc_contacts.get_gc_list()
def get_nick_list(self, account, room_jid):
return self._accounts[account].gc_contacts.get_nick_list(room_jid)
def get_gc_contact(self, account, room_jid, nick):
return self._accounts[account].gc_contacts.get_gc_contact(room_jid, nick)
def is_gc_contact(self, account, jid):
return self._accounts[account].gc_contacts.is_gc_contact(jid)
def get_nb_role_total_gc_contacts(self, account, room_jid, role):
return self._accounts[account].gc_contacts.get_nb_role_total_gc_contacts(room_jid, role)
class Contacts():
"""
This is a breakout of the contact related behavior of the old
Contacts class (which is not called LegacyContactsAPI)
"""
def __init__(self):
# list of contacts {jid1: [C1, C2]}, } one Contact per resource
self._contacts = {}
def add_contact(self, contact):
if contact.jid not in self._contacts:
self._contacts[contact.jid] = [contact]
return
contacts = self._contacts[contact.jid]
# We had only one that was offline, remove it
if len(contacts) == 1 and contacts[0].show == 'offline':
# Do not use self.remove_contact: it deteles
# self._contacts[account][contact.jid]
contacts.remove(contacts[0])
# If same JID with same resource already exists, use the new one
for c in contacts:
if c.resource == contact.resource:
self.remove_contact(c)
break
contacts.append(contact)
def remove_contact(self, contact):
if contact.jid not in self._contacts:
return
if contact in self._contacts[contact.jid]:
self._contacts[contact.jid].remove(contact)
if len(self._contacts[contact.jid]) == 0:
del self._contacts[contact.jid]
def remove_jid(self, jid):
"""
Remove all contacts for a given jid
"""
if jid in self._contacts:
del self._contacts[jid]
def get_contacts(self, jid):
"""
Return the list of contact instances for this jid
"""
return self._contacts.get(jid, [])
def get_contact(self, jid, resource=None):
### WARNING ###
# This function returns a *RANDOM* resource if resource = None!
# Do *NOT* use if you need to get the contact to which you
# send a message for example, as a bare JID in Jabber means
# highest available resource, which this function ignores!
"""
Return the contact instance for the given resource if it's given else the
first contact is no resource is given or None if there is not
"""
if jid in self._contacts:
if not resource:
return self._contacts[jid][0]
for c in self._contacts[jid]:
if c.resource == resource:
return c
return self._contacts[jid][0]
def iter_contacts(self):
for jid in list(self._contacts.keys()):
for contact in self._contacts[jid][:]:
yield contact
def get_jid_list(self):
return list(self._contacts.keys())
def get_contacts_jid_list(self):
return [jid for jid, contact in self._contacts.items() if not
contact[0].is_groupchat()]
def get_contact_from_full_jid(self, fjid):
"""
Get Contact object for specific resource of given jid
"""
barejid, resource = common.gajim.get_room_and_nick_from_fjid(fjid)
return self.get_contact(barejid, resource)
def get_first_contact_from_jid(self, jid):
if jid in self._contacts:
return self._contacts[jid][0]
def get_contacts_from_group(self, group):
"""
Return all contacts in the given group
"""
group_contacts = []
for jid in self._contacts:
contacts = self.get_contacts(jid)
if group in contacts[0].groups:
group_contacts += contacts
return group_contacts
def change_contact_jid(self, old_jid, new_jid):
if old_jid not in self._contacts:
return
self._contacts[new_jid] = []
for _contact in self._contacts[old_jid]:
_contact.jid = new_jid
self._contacts[new_jid].append(_contact)
del self._contacts[old_jid]
class GC_Contacts():
def __init__(self):
# list of contacts that are in gc {room_jid: {nick: C}}}
self._rooms = {}
def add_gc_contact(self, gc_contact):
if gc_contact.room_jid not in self._rooms:
self._rooms[gc_contact.room_jid] = {gc_contact.name: gc_contact}
else:
self._rooms[gc_contact.room_jid][gc_contact.name] = gc_contact
def remove_gc_contact(self, gc_contact):
if gc_contact.room_jid not in self._rooms:
return
if gc_contact.name not in self._rooms[gc_contact.room_jid]:
return
del self._rooms[gc_contact.room_jid][gc_contact.name]
# It was the last nick in room ?
if not len(self._rooms[gc_contact.room_jid]):
del self._rooms[gc_contact.room_jid]
def remove_room(self, room_jid):
if room_jid in self._rooms:
del self._rooms[room_jid]
def get_gc_list(self):
return self._rooms.keys()
def get_nick_list(self, room_jid):
gc_list = self.get_gc_list()
if not room_jid in gc_list:
return []
return list(self._rooms[room_jid].keys())
def get_gc_contact(self, room_jid, nick):
nick_list = self.get_nick_list(room_jid)
if not nick in nick_list:
return None
return self._rooms[room_jid][nick]
def is_gc_contact(self, jid):
"""
>>> gc = GC_Contacts()
>>> gc._rooms = {'[email protected]' : {'test' : True}}
>>> gc.is_gc_contact('[email protected]/test')
True
>>> gc.is_gc_contact('[email protected]')
False
"""
jid = jid.split('/')
if len(jid) != 2:
return False
gcc = self.get_gc_contact(jid[0], jid[1])
return gcc != None
def get_nb_role_total_gc_contacts(self, room_jid, role):
"""
Return the number of group chat contacts for the given role and the total
number of group chat contacts
"""
if room_jid not in self._rooms:
return 0, 0
nb_role = nb_total = 0
for nick in self._rooms[room_jid]:
if self._rooms[room_jid][nick].role == role:
nb_role += 1
nb_total += 1
return nb_role, nb_total
class MetacontactManager():
def __init__(self, contacts):
self._metacontacts_tags = {}
self._contacts = contacts
def change_account_name(self, old_name, new_name):
self._metacontacts_tags[new_name] = self._metacontacts_tags[old_name]
del self._metacontacts_tags[old_name]
def add_account(self, account):
if account not in self._metacontacts_tags:
self._metacontacts_tags[account] = {}
def remove_account(self, account):
del self._metacontacts_tags[account]
def define_metacontacts(self, account, tags_list):
self._metacontacts_tags[account] = tags_list
def _get_new_metacontacts_tag(self, jid):
if not jid in self._metacontacts_tags:
return jid
#FIXME: can this append ?
assert False
def iter_metacontacts_families(self, account):
for tag in self._metacontacts_tags[account]:
family = self._get_metacontacts_family_from_tag(account, tag)
yield family
def _get_metacontacts_tag(self, account, jid):
"""
Return the tag of a jid
"""
if not account in self._metacontacts_tags:
return None
for tag in self._metacontacts_tags[account]:
for data in self._metacontacts_tags[account][tag]:
if data['jid'] == jid:
return tag
return None
def add_metacontact(self, brother_account, brother_jid, account, jid, order=None):
tag = self._get_metacontacts_tag(brother_account, brother_jid)
if not tag:
tag = self._get_new_metacontacts_tag(brother_jid)
self._metacontacts_tags[brother_account][tag] = [{'jid': brother_jid,
'tag': tag}]
if brother_account != account:
common.gajim.connections[brother_account].store_metacontacts(
self._metacontacts_tags[brother_account])
# be sure jid has no other tag
old_tag = self._get_metacontacts_tag(account, jid)
while old_tag:
self.remove_metacontact(account, jid)
old_tag = self._get_metacontacts_tag(account, jid)
if tag not in self._metacontacts_tags[account]:
self._metacontacts_tags[account][tag] = [{'jid': jid, 'tag': tag}]
else:
if order:
self._metacontacts_tags[account][tag].append({'jid': jid,
'tag': tag, 'order': order})
else:
self._metacontacts_tags[account][tag].append({'jid': jid,
'tag': tag})
common.gajim.connections[account].store_metacontacts(
self._metacontacts_tags[account])
def remove_metacontact(self, account, jid):
if not account in self._metacontacts_tags:
return
found = None
for tag in self._metacontacts_tags[account]:
for data in self._metacontacts_tags[account][tag]:
if data['jid'] == jid:
found = data
break
if found:
self._metacontacts_tags[account][tag].remove(found)
common.gajim.connections[account].store_metacontacts(
self._metacontacts_tags[account])
break
def has_brother(self, account, jid, accounts):
tag = self._get_metacontacts_tag(account, jid)
if not tag:
return False
meta_jids = self._get_metacontacts_jids(tag, accounts)
return len(meta_jids) > 1 or len(meta_jids[account]) > 1
def is_big_brother(self, account, jid, accounts):
family = self.get_metacontacts_family(account, jid)
if family:
nearby_family = [data for data in family
if account in accounts]
bb_data = self._get_metacontacts_big_brother(nearby_family)
if bb_data['jid'] == jid and bb_data['account'] == account:
return True
return False
def _get_metacontacts_jids(self, tag, accounts):
"""
Return all jid for the given tag in the form {acct: [jid1, jid2],.}
"""
answers = {}
for account in self._metacontacts_tags:
if tag in self._metacontacts_tags[account]:
if account not in accounts:
continue
answers[account] = []
for data in self._metacontacts_tags[account][tag]:
answers[account].append(data['jid'])
return answers
def get_metacontacts_family(self, account, jid):
"""
Return the family of the given jid, including jid in the form:
[{'account': acct, 'jid': jid, 'order': order}, ] 'order' is optional
"""
tag = self._get_metacontacts_tag(account, jid)
return self._get_metacontacts_family_from_tag(account, tag)
def _get_metacontacts_family_from_tag(self, account, tag):
if not tag:
return []
answers = []
for account in self._metacontacts_tags:
if tag in self._metacontacts_tags[account]:
for data in self._metacontacts_tags[account][tag]:
data['account'] = account
answers.append(data)
return answers
def _compare_metacontacts(self, data1, data2):
"""
Compare 2 metacontacts
Data is {'jid': jid, 'account': account, 'order': order} order is
optional
"""
jid1 = data1['jid']
jid2 = data2['jid']
account1 = data1['account']
account2 = data2['account']
contact1 = self._contacts.get_contact_with_highest_priority(account1, jid1)
contact2 = self._contacts.get_contact_with_highest_priority(account2, jid2)
show_list = ['not in roster', 'error', 'offline', 'invisible', 'dnd',
'xa', 'away', 'chat', 'online', 'requested', 'message']
# contact can be null when a jid listed in the metacontact data
# is not in our roster
if not contact1:
if contact2:
return -1 # prefer the known contact
else:
show1 = 0
priority1 = 0
else:
show1 = show_list.index(contact1.show)
priority1 = contact1.priority
if not contact2:
if contact1:
return 1 # prefer the known contact
else:
show2 = 0
priority2 = 0
else:
show2 = show_list.index(contact2.show)
priority2 = contact2.priority
# If only one is offline, it's always second
if show1 > 2 and show2 < 3:
return 1
if show2 > 2 and show1 < 3:
return -1
if 'order' in data1 and 'order' in data2:
if data1['order'] > data2['order']:
return 1
if data1['order'] < data2['order']:
return -1
if 'order' in data1:
return 1
if 'order' in data2:
return -1
transport1 = common.gajim.get_transport_name_from_jid(jid1)
transport2 = common.gajim.get_transport_name_from_jid(jid2)
if transport2 and not transport1:
return 1
if transport1 and not transport2:
return -1
if show1 > show2:
return 1
if show2 > show1:
return -1
if priority1 > priority2:
return 1
if priority2 > priority1:
return -1
server1 = common.gajim.get_server_from_jid(jid1)
server2 = common.gajim.get_server_from_jid(jid2)
myserver1 = common.gajim.config.get_per('accounts', account1, 'hostname')
myserver2 = common.gajim.config.get_per('accounts', account2, 'hostname')
if server1 == myserver1:
if server2 != myserver2:
return 1
elif server2 == myserver2:
return -1
if jid1 > jid2:
return 1
if jid2 > jid1:
return -1
# If all is the same, compare accounts, they can't be the same
if account1 > account2:
return 1
if account2 > account1:
return -1
return 0
def get_nearby_family_and_big_brother(self, family, account):
"""
Return the nearby family and its Big Brother
Nearby family is the part of the family that is grouped with the
metacontact. A metacontact may be over different accounts. If accounts
are not merged then the given family is split account wise.
(nearby_family, big_brother_jid, big_brother_account)
"""
if common.gajim.config.get('mergeaccounts'):
# group all together
nearby_family = family
else:
# we want one nearby_family per account
nearby_family = [data for data in family if account == data['account']]
big_brother_data = self._get_metacontacts_big_brother(nearby_family)
big_brother_jid = big_brother_data['jid']
big_brother_account = big_brother_data['account']
return (nearby_family, big_brother_jid, big_brother_account)
def _get_metacontacts_big_brother(self, family):
"""
Which of the family will be the big brother under wich all others will be
?
"""
family.sort(key=cmp_to_key(self._compare_metacontacts))
return family[-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | -6,566,727,521,522,041,000 | 36.282558 | 96 | 0.588747 | false |
davelab6/telaro | src/dash2/words/telaro/fileparse.py | 1 | 10711 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables. This is from O'Reilly's
# "Lex and Yacc", p. 63.
#
# Class-based example contributed to PLY by David McNab
# -----------------------------------------------------------------------------
import sys
sys.path.insert(0,"../..")
#import readline
import ply.lex as lex
import ply.yacc as yacc
import os
from getopt import getopt, GetoptError
import sys
from string import strip
from markov import Markov, random_sentence
def make_sentence(sentence, word_filter=None, max_chars=78):
res = ''
to_print = ''
while sentence:
word = sentence.pop(0)
if (word not in ' ()=-+.,:;\t?!"\'' and
word_filter and not word_filter.has_key(word.lower())):
continue
if word == '\x00':
res += to_print + "\n\n"
to_print = ''
continue
if len(to_print) + len(word) > max_chars:
res += to_print + "\n"
to_print = ''
if to_print:
if word in ' ()=-+.,:;\t?!"\'':
to_print += word
else:
to_print += ' ' + word
else:
to_print = word
if to_print:
res += to_print + "\n"
return res
class Parser:
"""
Base class for a lexer/parser that has the rules defined as methods
"""
tokens = ()
precedence = ()
def __init__(self, **kw):
self.debug = kw.get('debug', 0)
self.sentences = []
self.markov = Markov()
self.clause_starter = {}
self.para_starter = []
self.word_filter = kw.get('word_filter', None)
self.letter_priority = kw.get('letter_priority', None)
try:
modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
except:
modname = "parser"+"_"+self.__class__.__name__
self.debugfile = modname + ".dbg"
self.tabmodule = modname + "_" + "parsetab"
#print self.debugfile, self.tabmodule
# Build the lexer and parser
lex.lex(module=self, debug=self.debug)
yacc.yacc(module=self,
debug=self.debug,
debugfile=self.debugfile,
tabmodule=self.tabmodule)
def run(self, txt=None, para_starter=False):
if txt is None:
s = sys.stdin.read()
else:
s = txt
s = s.replace('\n\n', '\x00')
s = s.replace('\x00\x00', '\x00')
s = s.replace('\n\n', '')
s = s.replace('\n', ' ')
s = s.replace(' ', ' ')
yacc.parse(s)
print self.sentences
self.markov.printout()
print
print "clause starters"
keys = self.clause_starter.keys()
keys.sort()
for k in keys:
v = self.clause_starter[k]
print "\t", repr(k), v
print
print "para starters", self.para_starter
print
word_filter = self.word_filter
if self.letter_priority and word_filter:
# certain words are given a higher priority (multiplier)
# than others.
states = self.markov.states
for from_word, fp in states.items():
for to_word in fp.keys():
if word_filter.has_key(to_word.lower()):
fp[to_word] *= self.letter_priority
word_filter = None
self.markov.prepare()
if para_starter:
para_starters = None
else:
para_starters = self.para_starter
sentence = random_sentence(self.markov, 800,
starters=self.clause_starter,
para_starters=para_starters)
return make_sentence(sentence, word_filter=word_filter)
class Text(Parser):
tokens = (
'NAME',#'NUMBER',
'FULLSTOP',
'NULL',
#'LBRACK',
#'QUOTE',
#'RBRACK',
'COLON','SEMICOLON',
'EXCLAMATION','QUESTIONMARK',
'NEWLINE',
'TAB',
'SLASH',
'COMMA',
)
# Tokens
t_NULL = r'\x00'
t_FULLSTOP = r'\.'
#t_SPACE = r'\ '
t_COLON = r':'
t_SEMICOLON = r';'
t_NEWLINE = r'\n'
t_EXCLAMATION = r'!'
#t_QUOTE = r'[\'"]'
#t_LBRACK = r'\('
#t_RBRACK = r'\)'
t_QUESTIONMARK = r'\?'
t_TAB = r'\t'
t_COMMA = r','
t_SLASH = r'/'
t_NAME = r'[a-zA-Z0-9_][\'`a-zA-Z0-9_]*'
def _t_FLOAT(self, t):
r'\d+[\.]\d*'
try:
t.value = float(t.value)
except ValueError:
print "Integer value too large", t.value
t.value = 0
print "parsed number %s" % repr(t.value)
return t
def _t_NUMBER(self, t):
r'\d+'
try:
t.value = float(t.value)
except ValueError:
print "Integer value too large", t.value
t.value = 0
print "parsed number %s" % repr(t.value)
return t
t_ignore = " "
def _t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
print "Illegal character '%s'" % repr(t.value[0])
t.lexer.skip(1)
# Parsing rules
#precedence = (
# ('left','PLUS','MINUS'),
# ('left','TIMES','DIVIDE'),
# ('left', 'EXP'),
# ('right','UMINUS'),
# )
def p_text_para(self, p):
'text : paragraph'
self.sentences.append(p[1])
self.para_starter.append(p[1][0])
p[0] = [p[1]]
def p_text_paras(self, p):
'text : text NULL paragraph'
self.markov.inc_state_link(p[1][-1][-1], p[2])
self.markov.inc_state_link(p[2], p[3][0])
self.sentences.append(p[3])
self.para_starter.append(p[3][0])
p[0] = p[1] + [p[3]]
#print "join", repr(p[-1][-1][-1]), repr(p[2]), repr(p[3][0])
def p_paragraph_assign(self, p):
'paragraph : sentences'
#self.sentences.append(p[1])
#self.markov.inc_state_link(p[1][-1], p[2])
p[0] = p[1] #+ [p[2]]
def p_sentences_ended(self, p):
"""sentence : sentence clausedivider
"""
#if p[2] != '\n' or p[1][-1] in ':;,. \n':
self.markov.inc_state_link(p[1][-1], p[2])
p[0] = p[1] + [p[2]]
#def p_paradivider_expr(self, p):
# """paradivider : FULLSTOP NEWLINE
# """
# self.markov.inc_state_link(p[1], p[2])
# p[0] = [p[1], p[2]]
def p_sentenceending_prefixedtwice(self, p):
"""sentenceending : clausedivider clausedivider sentence
"""
#if p[1] != '\n' or p[2][0] in ':;,. \n':
self.markov.inc_state_link(p[1], p[2])
self.markov.inc_state_link(p[2], p[3][0])
if not self.clause_starter.has_key(p[2]):
self.clause_starter[p[2]] = []
self.clause_starter[p[2]].append(p[3][0])
if p[2] in '.?! \n':
self.para_starter.append(p[3][0])
p[0] = [p[1], p[2]] + p[3]
def p_sentenceending_prefixed(self, p):
"""sentenceending : clausedivider sentence
"""
#if p[1] != '\n' or p[2][0] in ':;,. \n':
self.markov.inc_state_link(p[1], p[2][0])
if not self.clause_starter.has_key(p[1]):
self.clause_starter[p[1]] = []
self.clause_starter[p[1]].append(p[2][0])
if p[1] in '.?! \n':
self.para_starter.append(p[2][0])
p[0] = [p[1]] + p[2]
def p_sentences_divided(self, p):
"""sentence : sentence sentenceending
"""
#if p[2][0] != '\n' or p[1][-1] in '\n.':
self.markov.inc_state_link(p[1][-1], p[2][0])
p[0] = p[1] + p[2]
def p_sentences_single(self, p):
"""sentences : sentence
"""
#print "single sentence", p[1]
p[0] = p[1]
def p_clausedivider_expr(self, p):
"""clausedivider : FULLSTOP
| COLON
| SEMICOLON
| TAB
| SLASH
| COMMA
| EXCLAMATION
| QUESTIONMARK
"""
p[0] = p[1]
def p_sentence_namesorlinks(self, p):
"""sentence : sentence NAME
"""
#print "sentence names", p[1], p[2]
self.markov.inc_state_link(p[1][-1], p[2])
p[0] = p[1] + [p[2]]
#def p_hyperlink_expr1(self, p):
# """hyperlink : NAME COLON SLASH SLASH namedots
# """
# p[0] = p[1]+"://"+p[5]
# print "hyperlink", p[0]
#def p_namedots_expr(self, p):
# """namedots : NAME FULLSTOP namedots
# """
# p[0] = p[1]+"."+p[3]
#def p_namedots_name(self, p):
# """namedots : NAME
# """
# p[0] = p[1]
def p_sentence_name(self, p):
"""sentence : NAME
"""
p[0] = [p[1]]
#def p_nameorhyp_exp(self, p):
# """nameorhyp : NAME
# | hyperlink"""
# p[0] = p[1]
def p_error(self, p):
if p:
print "Syntax error at '%s'" % repr(p.value)
else:
print "Syntax error at EOF"
def check_all_letters_in(letters, word):
for w in word:
if w.lower() not in letters and w.upper() not in letters:
return False
return True
if __name__ == '__main__':
letter_priority = 0
use_words = False
words_file = "/usr/share/dict/words"
letters = map(chr, range(65+32, 65+26+32)) + ["'`"]
try:
opts, args = getopt(sys.argv[1:], "l:d:hp:",
["letters=", "dictionary=", "letter-priority=",
"help"])
except GetoptError, message:
print "%s: %s" %(sys.argv[0], message)
usage()
exit(0)
for optind, optarg in opts:
if optind in ("--letter-priority", "-p"):
letter_priority = int(optarg)
elif optind in ("--dictionary", "-d"):
use_words = True
words_file = optarg
elif optind in ("--letters", "-l"):
letters = []
for l in optarg:
letters.append(l)
words = None
if use_words:
words = {}
for w in open(words_file).readlines():
w = w.strip()
if not letters:
words[w.lower()] = 1
else:
if check_all_letters_in(letters, w):
words[w.lower()] = 1
calc = Text(word_filter=words, letter_priority=letter_priority)
print calc.run()
| gpl-3.0 | 5,159,091,096,856,290,000 | 28.105978 | 101 | 0.469051 | false |
RoyGunhooYoon/mcb | mcb.py | 1 | 3638 | #!/usr/bin/python3
import shelve
import sys
import pyperclip
print('\nWelcome to mcb!\n')
print('Type help to see manual\n')
while True:
args = input('>').split()
commands = ['help', 'list', 'load', 'save', 'quit', 'delete', 'show']
command = args[0]
mcb_shelve = shelve.open('mcb')
if command not in commands:
print("Unknown command, type help to see list of available commands.")
# Single command operations
else:
if command == 'quit':
print('Bye')
mcb_shelve.close()
sys.exit()
elif command == 'help':
doc = open('help.txt')
print(doc.read())
doc.close()
elif command == 'list':
if len(mcb_shelve) > 0:
for k in mcb_shelve:
print('Keyword: {} Overview: {}'.format(k, mcb_shelve[k][:30] + '...'))
else:
print("Could not find any keywords. Use save command to store clipboard into database.")
elif command == 'save':
try:
keyword = args[1]
content = pyperclip.paste()
if keyword in mcb_shelve:
ask = input("Key already exist. Do you want to override it? (y/n)")
if ask == 'y':
mcb_shelve[keyword] = pyperclip.paste()
print("Keyword override success. New content: {}"\
.format(content[:30] + '...'))
else:
print("Keyword override denied by user.")
else:
mcb_shelve[keyword] = pyperclip.paste()
print("Clipboard successfully saved with keyword\nContent: {}"\
.format(content[:30] + '...'))
except:
print("Please supply a keyword name to store clipboard content.")
elif command == 'load':
try:
keyword = args[1]
if keyword in mcb_shelve:
pyperclip.copy(mcb_shelve[keyword])
print("Content successfully copied to clipboard ctrl + v to paste.")
else:
print("Given keyword is not found. Type list to see available keywords.")
except:
print("Please supply keyword name to load stored clipboard.")
elif command == 'delete':
try:
keyword = args[1]
if keyword in mcb_shelve:
del mcb_shelve[keyword]
print("Keyword: {} and its content has been removed"\
.format(keyword))
elif keyword == '*':
ask = input("Are you sure you want to delete all keywords and its contents?(y/n)")
if ask == 'y':
for keyword in mcb_shelve:
del mcb_shelve[keyword]
print("Deleted all keywords in database.")
else:
print("Request denied by user.")
else:
print("There are no matching keyword to delete.")
except:
print("Please supply keyword name that is to be deleted.")
elif command == 'show':
try:
keyword = args[1]
if keyword in mcb_shelve:
print(mcb_shelve[keyword])
else:
print("Given keyword is not found in database.")
except:
print("Please supply keyword name.")
| bsd-3-clause | 1,446,098,040,609,851,400 | 38.11828 | 104 | 0.474711 | false |
pbanaszkiewicz/amy | amy/autoemails/tests/test_admin_edit_template.py | 1 | 6114 | from datetime import date, timedelta
from django.test import TestCase
from django.urls import reverse
from rq.exceptions import NoSuchJobError
from autoemails import admin
from autoemails.actions import NewInstructorAction
from autoemails.job import Job
from autoemails.models import EmailTemplate, RQJob, Trigger
from autoemails.tests.base import FakeRedisTestCaseMixin, dummy_job
from workshops.models import Event, Organization, Person, Role, Task
from workshops.tests.base import SuperuserMixin
class TestAdminJobReschedule(SuperuserMixin, FakeRedisTestCaseMixin, TestCase):
def setUp(self):
super().setUp()
self._setUpSuperuser() # creates self.admin
# save scheduler and connection data
self._saved_scheduler = admin.scheduler
# overwrite
admin.scheduler = self.scheduler
# fake RQJob
self.email = EmailTemplate.objects.create(slug="test-1")
self.trigger = Trigger.objects.create(
action="new-instructor", template=self.email
)
self.rqjob = RQJob.objects.create(job_id="fake-id", trigger=self.trigger)
self.new_template = "Welcome to AMY!"
# test event and task
LC_org = Organization.objects.create(
domain="librarycarpentry.org", fullname="Library Carpentry"
)
self.event = Event.objects.create(
slug="test-event",
host=Organization.objects.first(),
administrator=LC_org,
start=date.today() + timedelta(days=7),
end=date.today() + timedelta(days=8),
)
p = Person.objects.create(
personal="Harry", family="Potter", email="[email protected]"
)
r = Role.objects.create(name="instructor")
self.task = Task.objects.create(event=self.event, person=p, role=r)
def tearDown(self):
super().tearDown()
# bring back saved scheduler
admin.scheduler = self._saved_scheduler
def test_view_doesnt_allow_GET(self):
# log admin user
self._logSuperuserIn()
url = reverse("admin:autoemails_rqjob_edit_template", args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 405) # Method not allowed
def test_view_access_by_anonymous(self):
url = reverse("admin:autoemails_rqjob_edit_template", args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
# cannot check by assertRedirect because there's additional `?next`
# parameter
self.assertTrue(rv.url.startswith(reverse("login")))
def test_view_access_by_admin(self):
# log admin user
self._logSuperuserIn()
# try accessing the view again
url = reverse("admin:autoemails_rqjob_edit_template", args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
self.assertRedirects(
rv, reverse("admin:autoemails_rqjob_preview", args=[self.rqjob.pk])
)
def test_no_such_job(self):
# log admin user
self._logSuperuserIn()
with self.assertRaises(NoSuchJobError):
Job.fetch(self.rqjob.job_id, connection=self.scheduler.connection)
url = reverse("admin:autoemails_rqjob_edit_template", args=[self.rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
"The corresponding job in Redis was probably already executed",
rv.content.decode("utf-8"),
)
def test_job_not_in_scheduled_jobs_queue(self):
# log admin user
self._logSuperuserIn()
# case 1: job didn't go through RQ-Scheduler, but directly to Queue
job1 = self.queue.enqueue(dummy_job)
rqjob1 = RQJob.objects.create(job_id=job1.id, trigger=self.trigger)
Job.fetch(job1.id, connection=self.scheduler.connection) # no error
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
self.assertIsNone(pipe.zscore(self.scheduler.scheduled_jobs_key, job1.id))
url = reverse("admin:autoemails_rqjob_edit_template", args=[rqjob1.pk])
payload = {
"template": self.new_template,
}
rv = self.client.post(url, payload, follow=True)
self.assertIn(
f"The job {job1.id} template cannot be updated.",
rv.content.decode("utf-8"),
)
# case 2: job is no longer in the RQ-Scheduler queue, but it was there!
job2 = self.scheduler.enqueue_in(
timedelta(minutes=5),
dummy_job,
)
rqjob2 = RQJob.objects.create(job_id=job2.id, trigger=self.trigger)
# move job to the queue so it's executed
self.scheduler.enqueue_job(job2)
Job.fetch(job2.id, connection=self.scheduler.connection) # no error
url = reverse("admin:autoemails_rqjob_edit_template", args=[rqjob2.pk])
rv = self.client.post(url, payload, follow=True)
self.assertIn(
f"The job {job2.id} template cannot be updated.",
rv.content.decode("utf-8"),
)
def test_job_template_updated_correctly(self):
# log admin user
self._logSuperuserIn()
action = NewInstructorAction(
self.trigger,
objects={"event": self.event, "task": self.task},
)
job = self.scheduler.enqueue_in(
timedelta(minutes=60),
action,
)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
Job.fetch(job.id, connection=self.scheduler.connection) # no error
url = reverse("admin:autoemails_rqjob_edit_template", args=[rqjob.pk])
payload = {
"template": self.new_template,
}
rv = self.client.post(url, payload, follow=True)
self.assertIn(
f"The job {job.id} template was updated",
rv.content.decode("utf-8"),
)
job.refresh()
self.assertEqual(job.instance.template.body_template, "Welcome to AMY!")
| mit | 7,210,350,057,709,714,000 | 36.740741 | 86 | 0.627412 | false |
R-daneel-olivaw/CPET | module1/probes/Pobe.py | 1 | 7811 | '''
Created on Feb 25, 2015
@author: Akshat
'''
import psutil
from module1.ds.procRecord import ProcRecord
from time import sleep
import csv
import os, platform, subprocess, re
from subprocess import check_output
import copy
class ProcessProbe:
PROCNAME = None
p_map = {}
k_list = [0]
o_map = {}
def __init__(self, processName, pid=None, output_path=None, stepDelay=0.5):
if pid:
self.pid = int(pid)
else:
self.pid = None
self.PROCNAME = processName
self.stepDelay = stepDelay
self.output_path = output_path
def isMatch(self, proc, name):
return name in repr(proc)
def addToCSV(self, writer, mango):
# writer.appe(mango.getTime(), mango.getCpu(), mango.getMem())
seq = mango.toSequence()
writer.writerow(seq)
return
def getProcessNameForPid(self, pid):
p = psutil.Process(int(pid))
p_name = p.name()
return p_name
def getProcessForPid(self, pid):
p = psutil.Process(pid)
return p
def getPidForProcessName(self, procName):
for proc in psutil.process_iter():
if self.isMatch(proc, self.PROCNAME):
# print(proc)
procId = proc.pid
return procId
return 0
def getProbeTargetName(self):
return self.PROCNAME
def appendChildProcesses(self):
c_proc_id = copy.deepcopy(self.p_map)
parent_id = self.getPidForProcessName(self.PROCNAME)
# parent_id = 7832
c_proc_id[parent_id] = [parent_id]
# try:
c_process = psutil.Process(parent_id)
childs = c_process.children(recursive=True)
for chp in childs:
c_proc_id[parent_id].append(chp.pid)
c_proc_id[parent_id] = list(set(c_proc_id[parent_id]))
'''
except:
print('process ', p, 'lost')
continue
'''
self.p_map = c_proc_id
def get_process(self, p):
if p not in self.o_map:
pr = psutil.Process(p)
self.o_map[p] = pr
return self.o_map[p]
def get_processor_speed(self):
if platform.system() == "Windows":
pro_info = check_output("wmic cpu get name,CurrentClockSpeed,MaxClockSpeed", shell=True)
pro_info = str(pro_info, "utf-8")
pro_info = pro_info.splitlines()[2]
pro_info = pro_info.split(sep=None, maxsplit=1)[0].strip()
return int(pro_info)
elif platform.system() == "Darwin":
import os
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = "sysctl -n machdep.cpu.brand_string"
print('os not supported')
print(subprocess.check_output(command).strip())
return 100
elif platform.system() == "Linux":
cpu_mhz = check_output("lscpu | grep MHz", shell=True)
cpu_mhz = str(cpu_mhz, 'utf-8')
f_cpu_mhz = float(cpu_mhz.split(':')[1].strip())
return f_cpu_mhz
print('os not supported')
return 100
def probe_process(self, p, rec):
try:
proc = self.get_process(p)
cpu = proc.get_cpu_percent(interval=0)
cpu_speed = self.get_processor_speed()
cpu = float("{0:.2f}".format(cpu_speed * (cpu / 100)))
mem = proc.get_memory_info()[0] / float(2 ** 20)
diskIo = proc.get_io_counters()
disk_rc = diskIo[0]
disk_wc = diskIo[1]
disk_rb = diskIo[2]
disk_wb = diskIo[3]
netc = len(proc.connections())
if not rec:
rec = ProcRecord(cpu, mem, disk_rc, disk_wc, disk_rb, disk_wb, netc, 0)
else:
rec.addCpu(cpu)
rec.addMem(mem)
rec.addReadc(disk_rc)
rec.addWritec(disk_wc)
rec.addReadb(disk_rb)
rec.addWriteb(disk_wb)
rec.addConnectionCount(netc)
rec.addChildCount(1)
print(p, 'cpu = ', cpu)
print(p, 'memory = ', mem)
print(p, 'disk_read_count = ', diskIo[0])
print(p, 'disk_write_count = ', diskIo[1])
print(p, 'disk_read_bytes = ', diskIo[2])
print(p, 'disk_write_bytes = ', diskIo[3])
print(p, 'network counters = ', netc)
print()
except:
print("process lost..")
self.k_list.append(p)
return rec
def startProbe(self):
parent_id = None
if self.pid:
self.PROCNAME = self.getProcessNameForPid(self.pid)
parent_id = self.pid
else :
parent_id = self.getPidForProcessName(self.PROCNAME)
print('STARTING PROBE FOR ', self.PROCNAME)
# parent_id = 7832
self.p_map[parent_id] = [parent_id]
# self.procId.append(self.getPidForProcessName(self.PROCNAME))
print(self.p_map)
# print(proc)
# print(proc.cpu_times())
try:
fileCsv = None
while True:
self.appendChildProcesses()
buffer = {}
for parent in self.p_map:
if psutil.pid_exists(parent) and parent != 0:
buffer[parent] = self.p_map[parent]
self.p_map = buffer
if not self.p_map:
break
for parent in self.p_map:
if self.pid:
fileCsv = open(self.output_path + self.PROCNAME + str(self.pid) + '.csv', 'a')
else:
fileCsv = open(self.output_path + self.PROCNAME + '.csv', 'a')
writer = csv.writer(fileCsv, delimiter=',', quoting=csv.QUOTE_NONE, lineterminator='\n')
if(parent not in self.k_list):
if psutil.pid_exists(parent):
p_childs = self.p_map[parent]
rec = None
for p in p_childs:
if(p not in self.k_list):
rec = self.probe_process(p, rec)
self.addToCSV(writer, rec)
else:
print('parent lost')
self.k_list.append(parent)
continue
sleep(self.stepDelay)
finally:
if fileCsv:
fileCsv.close()
print("Terminating...")
| lgpl-3.0 | -7,484,567,214,915,481,000 | 31.238298 | 118 | 0.428882 | false |
joaormatos/anaconda | mmfparser/player/extensions/kcclock.py | 1 | 26527 | # Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
"""
kcclock.mfx
Date and Time object - ClickTeam (http://www.clickteam.com)
Used to display date and time in various formats. Can act as a
stopwatch or countdown device.
Ported to Python by Mathias Kaerlev
"""
from mmfparser.player.extensions.common import UserExtension, HiddenObject
from mmfparser.player.event.actions.common import Action
from mmfparser.player.event.conditions.common import Condition
from mmfparser.player.event.expressions.common import Expression
# Actions
class Action0(Action):
"""
Set hundredths of seconds
Parameters:
0: Set hundredths of seconds (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
value = self.evaluate_index(0)
newTime = int(instance.objectPlayer.currentTime) + value / 100.0
instance.objectPlayer.currentTime = newTime
class SetTimeAction(Action):
def execute(self, instance):
new = self.evaluate_index(0)
value = instance.objectPlayer.get_struct_time()
self.action(value, new)
instance.objectPlayer.set_struct_time(value)
def action(self, value, new):
pass
class Action1(SetTimeAction):
"""
Set seconds
Parameters:
0: Set seconds (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_sec = new
class Action2(SetTimeAction):
"""
Set minutes
Parameters:
0: Set minutes (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_min = new
class Action3(SetTimeAction):
"""
Set hours
Parameters:
0: Set hours (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_hours = new
class Action4(SetTimeAction):
"""
Set day of week
Parameters:
0: Set day of week (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_wday = new
class Action5(SetTimeAction):
"""
Set day of month
Parameters:
0: Set day of month (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_mday = new
class Action6(SetTimeAction):
"""
Set month
Parameters:
0: Set month (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_mon = new
class Action7(SetTimeAction):
"""
Set year
Parameters:
0: Set year (EXPRESSION, ExpressionParameter)
"""
def action(self, value, new):
value.tm_year = new
class Action8(Action):
"""
Stop watch->Reset stop watch to 00:00:00
"""
def execute(self, instance):
instance.objectPlayer.counting = None
instance.objectPlayer.currentTime = 0
class Action9(Action):
"""
Stop watch->Start stop watch
"""
def execute(self, instance):
instance.objectPlayer.counting = 1
class Action10(Action):
"""
Stop watch->Pause stop watch
"""
def execute(self, instance):
instance.objectPlayer.counting = None
class Action11(Action):
"""
Visibility->Make object reappear
"""
def execute(self, instance):
instance.visible = True
class Action12(Action):
"""
Visibility->Make object invisible
"""
def execute(self, instance):
instance.visible = False
class Action13(Action):
"""
Position->Select position...
Parameters:
0: Select position... (POSITION, Position)
"""
def execute(self, instance):
destX, destY, _ = self.get_positions(
self.get_parameter(0))[0]
instance.set_position(destX, destY, True)
class Action14(Action):
"""
Count down->Set count down
Parameters:
0: Set count down (TIME, Time)
"""
def execute(self, instance):
instance.objectPlayer.currentTime = self.get_time(self.get_parameter(0))
class Action15(Action):
"""
Count down->Start count down
"""
def execute(self, instance):
instance.objectPlayer.counting = -1
class Action16(Action):
"""
Count down->Pause count down
"""
def execute(self, instance):
instance.objectPlayer.counting = None
class Action17(Action):
"""
Position->Set Horizontal Position
Parameters:
0: Set Horizontal Position (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
x = self.evaluate_index(0)
instance.set_position(x, instance.y, True)
class Action18(Action):
"""
Position->Set Vertical Position
Parameters:
0: Set Vertical Position (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
y = self.evaluate_index(0)
instance.set_position(instance.x, y, True)
class Action19(Action):
"""
Size->Set Horizontal Size
Parameters:
0: Set Horizontal Size (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
width = self.evaluate_index(0)
instance.objectPlayer.resize(width = width)
class Action20(Action):
"""
Size->Set Vertical Size
Parameters:
0: Set Vertical Size (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
height = self.evaluate_index(0)
instance.objectPlayer.resize(height = height)
# Conditions
class Condition0(Condition):
"""
Compare to chrono
Parameters:
0: Compare to chrono (CMPTIME, CompareTime)
"""
def created(self):
parameter = self.get_parameter(0)
self.compareValue = parameter.comparison
self.seconds = parameter.timer / 1000.0
def check(self, instance):
return self.compare(instance.objectPlayer.currentTime, self.seconds)
class Condition1(Condition):
"""
New clock second ?
"""
def created(self):
self.add_handlers(second_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition2(Condition):
"""
New clock minute ?
"""
def created(self):
self.add_handlers(minute_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition3(Condition):
"""
New clock hour ?
"""
def created(self):
self.add_handlers(hour_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition4(Condition):
"""
New clock day ?
"""
def created(self):
self.add_handlers(day_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition5(Condition):
"""
New clock month ?
"""
def created(self):
self.add_handlers(month_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition6(Condition):
"""
New clock year ?
"""
def created(self):
self.add_handlers(year_changed = self.changed)
def changed(self):
self.generate()
def check(self, instance):
return self.isTriggered
class Condition7(Condition):
"""
Compare to count down
Parameters:
0: Compare to count down (CMPTIME, CompareTime)
"""
def created(self):
parameter = self.get_parameter(0)
self.compareValue = parameter.comparison
self.seconds = parameter.timer / 1000.0
def check(self, instance):
return self.compare(instance.objectPlayer.currentTime, self.seconds)
class Condition8(Condition):
"""
Is visible ?
"""
def check(self, instance):
return instance.visible
# Expressions
class Expression0(Expression):
"""
Retrieve hundredths of seconds
Return type: Int
"""
def get(self, instance):
val = instance.objectPlayer.currentTime
return int((val - int(val)) * 100)
class Expression1(Expression):
"""
Retrieve seconds
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_second
class Expression2(Expression):
"""
Retrieve minutes
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_min
class Expression3(Expression):
"""
Retrieve hours
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_hour
class Expression4(Expression):
"""
Retrieve day of week
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_wday
class Expression5(Expression):
"""
Retrieve day of month
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_mday
class Expression6(Expression):
"""
Retrieve month
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_mon
class Expression7(Expression):
"""
Retrieve year
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_struct_time().tm_year
class Expression8(Expression):
"""
Retrieve Stop watch time
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.currentTime
class Expression9(Expression):
"""
Retrieve analog clock data->X coordinate of clock centre
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_center(True)[0]
class Expression10(Expression):
"""
Retrieve analog clock data->Y coordinate of clock centre
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.get_center(True)[1]
def get_hand(instance, angle, is_hour = False):
radius = instance.objectPlayer.get_radius()
if is_hour:
radius /= 1.5
mid_x, mid_y = instance.objectPlayer.get_center(True)
x_value = math.cos(angle)
y_value = -math.sin(angle)
return (mid_x + x_value * radius, mid_y + y_value * radius)
class Expression11(Expression):
"""
Retrieve analog clock data->X coordinate of hour hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_hour_angle(hour + minute / 60.0)
return get_hand(instance, angle)[0]
class Expression12(Expression):
"""
Retrieve analog clock data->Y coordinate of hour hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_hour_angle(hour + minute / 60.0)
return get_hand(instance, angle)[1]
class Expression13(Expression):
"""
Retrieve analog clock data->X coordinate of minute hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_second_minute_angle(minute + second / 60.0)
return get_hand(instance, angle)[0]
class Expression14(Expression):
"""
Retrieve analog clock data->Y coordinate of minute hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_second_minute_angle(minute + second / 60.0)
return get_hand(instance, angle)[1]
class Expression15(Expression):
"""
Retrieve analog clock data->X coordinate of second hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_second_minute_angle(second)
return get_hand(instance, angle)[0]
class Expression16(Expression):
"""
Retrieve analog clock data->Y coordinate of second hand's end
Return type: Int
"""
def get(self, instance):
second, minute, hour = instance.objectPlayer.get_time()
angle = get_second_minute_angle(second)
return get_hand(instance, angle)[1]
class Expression17(Expression):
"""
Retrieve Count down time
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.currentTime
class Expression18(Expression):
"""
X Position of Clock
Return type: Int
"""
def get(self, instance):
return instance.x
class Expression19(Expression):
"""
Y Position of Clock
Return type: Int
"""
def get(self, instance):
return instance.y
class Expression20(Expression):
"""
X Size of Clock
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.width
class Expression21(Expression):
"""
Y Size of Clock
Return type: Int
"""
def get(self, instance):
return instance.objectPlayer.height
from mmfparser.data.font import LogFont
ANALOG_CLOCK = 0
DIGITAL_CLOCK = 1
INVISIBLE = 2
CALENDAR = 3
CLOCK = 0
STOPWATCH = 1
COUNTDOWN = 2
SHORTDATE = 0
LONGDATE = 1
FIXEDDATE = 2
import datetime
import calendar
import time
import math
from pyglet.gl import (glTranslatef, glPushMatrix, glPopMatrix, glBegin,
glEnd, glVertex2f, glColor3ub, GL_LINES, glLineWidth, glEnable,
glDisable, GL_LINE_SMOOTH, GL_POINT_SMOOTH, GL_LINE_LOOP)
from pyglet.graphics import vertex_list
from mmfparser.player.common import make_ellipse_vertices
def get_pointer_angle(value):
return math.radians(360.0 * value)
def get_mark_angle(i):
return get_pointer_angle((i + 1) / 12.0)
def get_hour_angle(i):
return -get_pointer_angle(i / 12.0 - 0.25)
def get_second_minute_angle(i):
return -get_pointer_angle(i / 60.0 - 0.25)
roman_characters = [ "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX",
"X", "XI", "XII"]
def display_pointer(angle, color, radius):
x_value = math.cos(angle)
y_value = math.sin(angle)
glBegin(GL_LINES)
glColor3ub(*color)
glVertex2f(0.0, 0.0)
glVertex2f(x_value * radius, y_value * radius)
glEnd()
class DefaultObject(HiddenObject):
clockLabels = None
border = None
formatString = None
label = None
text = None
counting = None
currentTime = 0
oldTime = None
def created(self, data):
self.width = data.readShort(True)
self.height = data.readShort(True)
data.skipBytes(4 * 16)
displayType = self.displayType = data.readShort(True)
self.clockMode = data.readShort(True)
self.drawBorder = data.readShort() != 0
self.drawLines = data.readShort() != 0
analogClockMarkerType = data.readShort(True)
font = LogFont(data)
if font.height in (8, -8) and font.faceName.lower() == 'system':
font.height = -13
font.weight = 700
color = self.color = data.readColor()
data.skipBytes(40)
self.displaySeconds = data.readShort(True) != 0
self.secondsColor = data.readColor()
self.displayMinutes = data.readShort(True) != 0
self.minutesColor = data.readColor()
self.displayHours = data.readShort(True) != 0
self.hoursColor = data.readColor()
digitalClockType = data.readShort(True)
calenderType = data.readShort(True)
calenderFormat = data.readShort(True)
data.skipBytes(40)
if self.clockMode == COUNTDOWN:
countHours = data.readShort(True)
countMinutes = data.readShort(True)
countSeconds = data.readShort(True)
self.currentTime = (countSeconds + countMinutes * 60 +
countHours * 60 * 60)
elif self.clockMode == CLOCK:
self.currentTime = time.time()
minWidth = data.readShort(True)
minHeight = data.readShort(True)
if displayType == ANALOG_CLOCK:
if analogClockMarkerType != 2:
self.clockLabels = []
for i in xrange(1, 13):
if analogClockMarkerType == 0:
text = str(i)
else:
text = roman_characters[i-1]
label = self.create_label(font, text, color)
label.width = label.content_width
label.height = label.content_height
label.y = label.content_height / 2
label.x = -label.content_width / 2
self.clockLabels.append(label)
self.make_border()
elif displayType == DIGITAL_CLOCK:
if digitalClockType == 0:
formatString = '%(hour)s:%(minute)s'
elif digitalClockType == 1:
formatString = '%(hour)s:%(minute)s:%(second)s'
elif digitalClockType == 2:
formatString = '%(full_hour)s:%(minute)s'
elif digitalClockType == 3:
formatString = '%(full_hour)s:%(minute)s:%(second)s'
self.formatString = formatString
elif displayType == CALENDAR:
if calenderType == SHORTDATE:
formatString = '%d-%m-%Y'
elif calenderType == LONGDATE:
formatString = '%d. %B %Y'
else:
if calenderFormat == 0:
formatString = '%d/%m/%y'
elif calenderFormat == 1:
formatString = '%d %B %Y'
elif calenderFormat == 2:
formatString = '%d %B, %Y'
elif calenderFormat == 3:
formatString = '%B %d, %Y'
elif calenderFormat == 4:
formatString = '%d-%b-%y'
elif calenderFormat == 5:
formatString = '%B, %y'
elif calenderFormat == 6:
formatString = '%b-%Y'
self.formatString = formatString
if displayType in (DIGITAL_CLOCK, CALENDAR):
label = self.label = self.create_label(font, '', color,
multiline = True)
label.height = self.height
label.width = self.width
label.content_valign = 'center'
label.set_style('align', 'center')
label.x = label.y = 0
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POINT_SMOOTH)
self.updateEnabled = True
def update(self):
if (self.counting is not None or self.clockMode == CLOCK or
self.displayType == CALENDAR):
self.currentTime += self.player.sinceLast * (self.counting or 1)
self.currentTime = max(0, self.currentTime)
val = self.get_struct_time()
old_val = self.oldTime
if old_val is not None:
if val.tm_sec != old_val.tm_sec:
self.fire_handler('second_changed')
if val.tm_hour != old_val.tm_hour:
self.fire_handler('hour_changed')
if val.tm_yday != old_val.tm_yday:
self.fire_handler('day_changed')
if val.tm_mon != old_val.tm_mon:
self.fire_handler('month_changed')
if val.tm_year != old_val.tm_year:
self.fire_handler('year_changed')
self.oldTime = val
def make_border(self):
if self.drawBorder:
if self.border is not None:
self.border.delete()
radius = min(self.width, self.height) / 2.0 - 20
vertices = []
for item in make_ellipse_vertices(radius * 2, radius * 2):
vertices += item
self.border = vertex_list(len(vertices) / 2,
('v2f', vertices),
('c3B', self.color * (len(vertices) / 2)))
def get_center(self, not_gl = False):
mid_x = self.width / 2.0
mid_y = self.height / 2.0
if not_gl:
return (self.parent.x + mid_x, self.parent.y + mid_y)
else:
return (self.x + mid_x, self.y - mid_y)
def get_radius(self):
return min(self.width, self.height) / 2.0 - 20
def draw(self):
mid_x, mid_y = self.get_center()
glLineWidth(2)
if self.displayType == ANALOG_CLOCK:
radius = min(self.width, self.height) / 2.0 - 10
radius_end = radius - 10
glPushMatrix()
glTranslatef(mid_x, mid_y, 0)
if self.clockLabels is not None or self.drawLines:
for i in xrange(0, 12):
glPushMatrix()
angle = get_mark_angle(i + 1)
x_value = math.cos(angle)
y_value = math.sin(angle)
if self.drawLines:
glBegin(GL_LINES)
glColor3ub(*self.color)
glVertex2f(x_value * radius_end, y_value * radius_end)
glVertex2f(x_value * (radius - 20),
y_value * (radius - 20))
glEnd()
if self.clockLabels is not None:
x = x_value * radius
y = y_value * radius
glTranslatef(x, y, 0)
self.clockLabels[-i].draw()
glPopMatrix()
# second pointer
second, minute, hour = self.get_time()
if self.displaySeconds:
display_pointer(get_second_minute_angle(
second),
self.secondsColor, radius_end)
if self.displayHours:
display_pointer(get_hour_angle(hour + minute / 60.0),
self.hoursColor, radius_end / 1.5)
if self.displayMinutes:
display_pointer(get_second_minute_angle(
minute + second / 60.0), self.minutesColor,
radius_end)
glPopMatrix()
if self.border is not None:
glPushMatrix()
glTranslatef(self.x + 20, self.y - 20, 0.0)
self.border.draw(GL_LINE_LOOP)
glPopMatrix()
elif self.displayType in (DIGITAL_CLOCK, CALENDAR):
text = self.get_text()
if text != self.text:
self.label.text = text
self.text = text
glPushMatrix()
glTranslatef(self.x, self.y, 0)
self.label.draw()
if self.displayType == DIGITAL_CLOCK and self.drawBorder:
glBegin(GL_LINE_LOOP)
glColor3ub(*self.color)
glVertex2f(0, 0)
glVertex2f(self.width, 0)
glVertex2f(self.width, -self.height)
glVertex2f(0, -self.height)
glEnd()
glPopMatrix()
def get_text(self):
if self.displayType == CALENDAR:
return time.strftime(self.formatString,
self.get_struct_time())
else:
second, minute, full_hour = self.get_time(False)
hour = full_hour % 12
return self.formatString % {
'second' : '%02d' % second,
'minute' : '%02d' % minute,
'full_hour' : '%02d' % full_hour,
'hour' : '%02d' % hour
}
def set_struct_time(self, value):
if self.displayType != CALENDAR and self.clockMode != CLOCK:
self.currentTime = calender.timegm(value)
else:
self.currentTime = time.mktime(value)
def get_struct_time(self):
if self.displayType != CALENDAR and self.clockMode != CLOCK:
return time.gmtime(self.currentTime)
else:
return time.localtime(self.currentTime)
def get_time(self, micro_precision = True):
val = self.get_struct_time()
second = val.tm_sec + self.currentTime - int(self.currentTime)
return (second, val.tm_min, val.tm_hour)
def resize(self, width = None, height = None):
self.width = width or self.width
self.height = height or self.height
self.make_border()
class kcclock(UserExtension):
objectPlayer = DefaultObject
actions = {
0 : Action0,
1 : Action1,
2 : Action2,
3 : Action3,
4 : Action4,
5 : Action5,
6 : Action6,
7 : Action7,
8 : Action8,
9 : Action9,
10 : Action10,
11 : Action11,
12 : Action12,
13 : Action13,
14 : Action14,
15 : Action15,
16 : Action16,
17 : Action17,
18 : Action18,
19 : Action19,
20 : Action20,
}
conditions = {
0 : Condition0,
1 : Condition1,
2 : Condition2,
3 : Condition3,
4 : Condition4,
5 : Condition5,
6 : Condition6,
7 : Condition7,
8 : Condition8,
}
expressions = {
0 : Expression0,
1 : Expression1,
2 : Expression2,
3 : Expression3,
4 : Expression4,
5 : Expression5,
6 : Expression6,
7 : Expression7,
8 : Expression8,
9 : Expression9,
10 : Expression10,
11 : Expression11,
12 : Expression12,
13 : Expression13,
14 : Expression14,
15 : Expression15,
16 : Expression16,
17 : Expression17,
18 : Expression18,
19 : Expression19,
20 : Expression20,
21 : Expression21,
}
extension = kcclock()
def get_extension():
return extension
| gpl-3.0 | 847,755,223,363,182,600 | 25.876393 | 80 | 0.580541 | false |
galaxyproject/gravity | gravity/cli.py | 1 | 8901 | """ Command line utilities for managing Galaxy servers
"""
from __future__ import print_function
import os
import sys
import json
import logging
import subprocess
from argparse import ArgumentParser
from .config_manager import ConfigManager
from .process_manager.supervisor_manager import SupervisorProcessManager
log = logging.getLogger(__name__)
DEFAULT_STATE_DIR = '~/.galaxy'
class GalaxyCLI(object):
""" Manage Galaxy server configurations and processes
"""
description = __doc__.strip()
def __init__(self):
self.arg_parser = None
self.args = None
self.__config_manager = None
self.__process_manager = None
def _configure_logging(self):
if self.args.debug:
level = logging.DEBUG
else:
level = logging.INFO
# Don't log full exceptions without -d
log.exception = log.error
logging.basicConfig(format='%(levelname)-8s: %(message)s', level=level)
@property
def state_dir(self):
if self.args.state_dir is not None:
state_dir = self.args.state_dir
elif 'GRAVITY_STATE_DIR' in os.environ:
state_dir = os.environ['GRAVITY_STATE_DIR']
else:
state_dir = DEFAULT_STATE_DIR
return os.path.abspath(os.path.expanduser(state_dir))
def parse_arguments(self):
self.arg_parser = ArgumentParser(description=self.description)
self.arg_parser.add_argument("-d", "--debug", default=False, action='store_true', help="Show debugging messages")
self.arg_parser.add_argument("--state-dir", default=None, help="Where process management configs and state will be stored (default: $GRAVITY_STATE_DIR or ~/.galaxy)")
python = subprocess.check_output(['python', '-c', 'import sys; print sys.executable'])
self.arg_parser.add_argument("-p", "--python-exe", default=None, help="The Python interpreter to use to create the virtualenv (default: %s)" % python.strip())
sub_arg_parsers = self.arg_parser.add_subparsers(dest='subcommand', help='SUBCOMMANDS')
# Add parsers for config subcommands
arg_parser_add = sub_arg_parsers.add_parser('add', help='Register config file(s)')
arg_parser_add.add_argument("config", nargs='+', help='Config files to register')
arg_parser_list = sub_arg_parsers.add_parser('list', help='List registered config files')
arg_parser_get = sub_arg_parsers.add_parser('get', help='Get registered config file details')
arg_parser_get.add_argument("config", help='Config file')
arg_parser_instances = sub_arg_parsers.add_parser('instances', help='List known instances and services')
arg_parser_rename = sub_arg_parsers.add_parser('rename', help='Rename config file')
arg_parser_rename.add_argument("rename_config_old", help='Old config file path')
arg_parser_rename.add_argument("rename_config_new", help='New config file path')
arg_parser_remove = sub_arg_parsers.add_parser('remove', help='Deregister config file(s)')
arg_parser_remove.add_argument("config", nargs='+', help='Config files or instance names to deregister')
# Add parsers for admin subcommands
arg_parser_status = sub_arg_parsers.add_parser('status', help='Display server status')
arg_parser_start = sub_arg_parsers.add_parser('start', help='Start configured services')
arg_parser_start.add_argument("instance", nargs='*', help='Instance(s) to start')
arg_parser_stop = sub_arg_parsers.add_parser('stop', help='Stop configured services')
arg_parser_stop.add_argument("instance", nargs='*', help='Instance(s) to stop')
arg_parser_restart = sub_arg_parsers.add_parser('restart', help='Restart configured services')
arg_parser_restart.add_argument("instance", nargs='*', help='Instance(s) to restart')
arg_parser_reload = sub_arg_parsers.add_parser('reload', help='Reload configured services')
arg_parser_reload.add_argument("instance", nargs='*', help='Instance(s) to reload')
arg_parser_graceful = sub_arg_parsers.add_parser('graceful', help='Gracefully reload configured services')
arg_parser_graceful.add_argument("instance", nargs='*', help='Instance(s) to gracefully reload')
arg_parser_shutdown = sub_arg_parsers.add_parser('shutdown', help='Stop all services and supervisord')
arg_parser_update = sub_arg_parsers.add_parser('update', help='Update process manager from config changes')
arg_parser_supervisorctl = sub_arg_parsers.add_parser('supervisorctl', help='Invoke supervisorctl directly')
arg_parser_supervisorctl.add_argument("supervisorctl_args", nargs='*', help='supervisorctl subcommand (optional)')
self.args = self.arg_parser.parse_args()
@property
def config_manager(self):
if self.__config_manager is None:
self.__config_manager = ConfigManager(self.state_dir, python_exe=self.args.python_exe)
return self.__config_manager
@property
def start_supervisord(self):
return self.args.subcommand not in ('shutdown', 'status')
@property
def process_manager(self):
if self.__process_manager is None:
self.__process_manager = SupervisorProcessManager(start_supervisord=self.start_supervisord)
return self.__process_manager
def main(self):
self.parse_arguments()
self._configure_logging()
# Handle the specified operation
if self.args.subcommand == 'add':
try:
self.config_manager.add(self.args.config)
except Exception as exc:
log.exception("Adding config failed: %s", exc)
sys.exit(1)
elif self.args.subcommand == 'list':
registered = self.config_manager.get_registered_configs()
if registered:
print('%-12s %-24s %s' % ('TYPE', 'INSTANCE NAME', 'CONFIG PATH'))
for config in sorted(registered.keys()):
print('%-12s %-24s %s' % (registered[config].get('config_type', 'unknown'), registered[config].get('instance_name', 'unknown'), config))
else:
print('No config files registered')
elif self.args.subcommand == 'instances':
configs = self.config_manager.get_registered_configs()
instances = self.config_manager.get_registered_instances()
if instances:
print('%-24s %-10s %-10s %s' % ('INSTANCE NAME', 'TYPE', 'SERVER', 'NAME'))
# not the most efficient...
for instance in instances:
instance_str = instance
for config in configs.values():
if config['instance_name'] == instance:
for service in config['services']:
print('%-24s %-10s %-10s %s' % (instance_str, service.config_type, service.service_type, service.service_name))
instance_str = ''
if instance_str == instance:
print('%-24s no services configured' % instance)
else:
print('No known instances')
elif self.args.subcommand == 'get':
config = self.config_manager.get_registered_config(os.path.abspath(os.path.expanduser(self.args.config)))
if config is None:
print('%s not found' % self.args.config)
else:
print(json.dumps(config, sort_keys=True, indent=4, separators=(',', ': ')))
elif self.args.subcommand == 'rename':
try:
self.config_manager.rename(self.args.rename_config_old, self.args.rename_config_new)
except Exception as exc:
log.exception("Renaming config failed: %s", exc)
sys.exit(1)
elif self.args.subcommand == 'remove':
try:
self.config_manager.remove(self.args.config)
except Exception as exc:
log.exception("Remove config failed: %s", exc)
sys.exit(1)
elif self.args.subcommand == 'supervisorctl':
self.process_manager.supervisorctl(*self.args.supervisorctl_args)
elif self.args.subcommand == 'update':
# TODO: update could a -f (force) option to wipe and rewrite all
# supervisor configs. warn that this could affect other instances
self.process_manager.update()
elif self.args.subcommand in ('status', 'shutdown'):
getattr(self.process_manager, self.args.subcommand)()
elif self.args.subcommand in ('start', 'stop', 'restart', 'reload', 'graceful'):
getattr(self.process_manager, self.args.subcommand)(self.args.instance)
def galaxy():
g = GalaxyCLI()
g.main()
| mit | -4,710,819,700,069,754,000 | 49.862857 | 174 | 0.624874 | false |
aarontuor/cpp | safekit/graph_training_utils.py | 1 | 5196 | """
Utilities for training the parameters of tensorflow computational graphs.
"""
import tensorflow as tf
import sys
import math
OPTIMIZERS = {'grad': tf.train.GradientDescentOptimizer, 'adam': tf.train.AdamOptimizer}
class EarlyStop:
"""
A class for determining when to stop a training while loop by a bad count criterion.
If the data is exhausted or the model's performance hasn't improved for *badlimit* training
steps, the __call__ function returns false. Otherwise it returns true.
"""
def __init__(self, badlimit=20):
"""
:param badlimit: Limit of for number of training steps without improvement for early stopping.
"""
self.badlimit = badlimit
self.badcount = 0
self.current_loss = sys.float_info.max
def __call__(self, mat, loss):
"""
Returns a boolean for customizable stopping criterion.
For first loop iteration set loss to sys.float_info.max.
:param mat: Current batch of features for training.
:param loss: Current loss during training.
:return: boolean, True when mat is not None and self.badcount < self.badlimit and loss != inf, nan.
"""
if mat is None:
sys.stderr.write('Done Training. End of data stream.')
cond = False
elif math.isnan(loss) or math.isinf(loss):
sys.stderr.write('Exiting due divergence: %s\n\n' % loss)
cond = False
elif loss > self.current_loss:
self.badcount += 1
if self.badcount >= self.badlimit:
sys.stderr.write('Exiting. Exceeded max bad count.')
cond = False
else:
cond = True
else:
self.badcount = 0
cond = True
self.current_loss = loss
return cond
class ModelRunner:
"""
A class for gradient descent training tensorflow models.
"""
def __init__(self, loss, ph_dict, learnrate=0.01, opt='adam', debug=False):
"""
:param loss: The objective function for optimization strategy.
:param ph_dict: A dictionary of names (str) to tensorflow placeholders.
:param learnrate: The step size for gradient descent.
:param opt: A tensorflow op implementing the gradient descent optimization strategy.
:param debug: Whether or not to print debugging info.
"""
self.loss = loss
self.ph_dict = ph_dict
self.debug = debug
self.train_op = OPTIMIZERS[opt](learnrate).minimize(loss)
self.init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(self.init)
def train_step(self, datadict):
"""
Performs a training step of gradient descent with given optimization strategy.
:param datadict: A dictionary of names (str) matching names in ph_dict to numpy matrices for this mini-batch.
"""
self.sess.run(self.train_op, feed_dict=get_feed_dict(datadict, self.ph_dict, debug=self.debug))
def eval(self, datadict, eval_tensors):
"""
Evaluates tensors without effecting parameters of model.
:param datadict: A dictionary of names (str) matching names in ph_dict to numpy matrices for this mini-batch.
:param eval_tensors: Tensors from computational graph to evaluate as numpy matrices.
:return: A list of evaluated tensors as numpy matrices.
"""
return self.sess.run(eval_tensors, feed_dict=get_feed_dict(datadict, self.ph_dict, train=0, debug=self.debug))
def get_feed_dict(datadict, ph_dict, train=1, debug=False):
"""
Function for pairing placeholders of a tensorflow computational graph with numpy arrays.
:param datadict: A dictionary with keys matching keys in ph_dict, and values are numpy arrays.
:param ph_dict: A dictionary where the keys match keys in datadict and values are placeholder tensors.
:param train: {1,0}. Different values get fed to placeholders for dropout probability, and batch norm statistics
depending on if model is training or evaluating.
:param debug: (boolean) Whether or not to print dimensions of contents of placeholderdict, and datadict.
:return: A feed dictionary with keys of placeholder tensors and values of numpy matrices.
"""
fd = {ph_dict[key]: datadict[key] for key in ph_dict}
dropouts = tf.get_collection('dropout_prob')
bn_deciders = tf.get_collection('bn_deciders')
if dropouts:
for prob in dropouts:
if train == 1:
fd[prob[0]] = prob[1]
else:
fd[prob[0]] = 1.0
if bn_deciders:
fd.update({decider: [train] for decider in bn_deciders})
if debug:
for desc in ph_dict:
print('%s\n\tph: %s\t%s\tdt: %s\t%s' % (desc,
ph_dict[desc].get_shape().as_list(),
ph_dict[desc].dtype,
datadict[desc].shape,
datadict[desc].dtype))
print(fd.keys())
return fd
| mit | 1,544,231,019,683,145,700 | 39.913386 | 118 | 0.613934 | false |
ThomasMcVay/MediaApp | AppCoreX/__init__.py | 1 | 1275 | #===============================================================================
# @Author: Madison Aster
# @ModuleDescription:
# @License:
# MediaApp Library - Python Package framework for developing robust Media
# Applications with Qt Library
# Copyright (C) 2013 Madison Aster
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation;
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See LICENSE in the root directory of this library for copy of
# GNU Lesser General Public License and other license details.
#===============================================================================
from .Core import * | lgpl-2.1 | 5,790,089,494,029,108,000 | 47.115385 | 83 | 0.614902 | false |
xiaxia47/Python-learning | spiders/downloadpic.py | 1 | 1376 | import os
from urllib.request import urlretrieve
from urllib.request import urlopen
from bs4 import BeautifulSoup
def getAbsoluteUrl(baseUrl, source):
if source.startswith("http://www."):
url= "http://" + source[11:]
elif source.startswith("http://"):
url = source
elif source.startswith("www."):
url = source[4:]
url = "http://" + source
else:
url = baseUrl + "/" + source
if baseUrl not in url:
return None
return url
def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):
path = absoluteUrl.replace("www.", "")
path = path.replace(baseUrl, "")
path = downloadDirectory + path
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
downloadDirectory = 'D:/Python Learning/download/'
baseUrl = "http://pythonscraping.com"
html = urlopen("http://www.pythonscraping.com")
bsObj = BeautifulSoup(html,"html.parser")
#imageLocation = bsObj.find("a", {"id": "logo"}).find("img")["src"]
#urlretrieve(imageLocation, "logo.jpg")
downloadList = bsObj.findAll(src=True)
for download in downloadList:
fileUrl = getAbsoluteUrl(baseUrl, download["src"])
if fileUrl is not None:
print(fileUrl)
#urlretrieve(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory))
| gpl-3.0 | 7,759,273,243,587,410,000 | 31.560976 | 75 | 0.65625 | false |
rshk/python-pcapng | pcapng/flags.py | 1 | 5513 | """
Module to wrap an integer in bitwise flag/field accessors.
"""
from collections import OrderedDict
from collections.abc import Iterable
from pcapng._compat import namedtuple
class FlagBase(object):
"""\
Base class for flag types to be used in a Flags object.
Handles the bitwise math so subclasses don't have to worry about it.
"""
__slots__ = [
"owner",
"offset",
"size",
"extra",
"mask",
]
def __init__(self, owner, offset, size, extra=None):
if size < 1:
raise TypeError("Flag must be at least 1 bit wide")
if size > owner._nbits:
raise TypeError("Flag must fit into owner size")
self.owner = owner
self.offset = offset
self.size = size
self.extra = extra
self.mask = ((1 << self.size) - 1) << self.offset
def get_bits(self):
return (self.owner._value & self.mask) >> self.offset
def set_bits(self, val):
val &= (1 << self.size) - 1
self.owner._value &= ~self.mask
self.owner._value |= val << self.offset
class FlagBool(FlagBase):
"""Object representing a single boolean flag"""
def __init__(self, owner, offset, size, extra=None):
if size != 1:
raise TypeError(
"{cls} can only be 1 bit in size".format(cls=self.__class__.__name__)
)
super(FlagBool, self).__init__(owner, offset, size)
def get(self):
return bool(self.get_bits())
def set(self, val):
self.set_bits(int(bool(val)))
class FlagUInt(FlagBase):
"""\
Object representing an unsigned integer of the given size stored in
a larger bitfield
"""
def get(self):
return self.get_bits()
def set(self, val):
self.set_bits(val)
class FlagEnum(FlagBase):
"""\
Object representing a range of values stored in part of a larger
bitfield
"""
def __init__(self, owner, offset, size, extra=None):
if not isinstance(extra, Iterable):
raise TypeError(
"{cls} needs an iterable of values".format(cls=self.__class__.__name__)
)
extra = list(extra)
if len(extra) > 2 ** size:
raise TypeError(
"{cls} iterable has too many values (got {got}, "
"{size} bits only address {max})".format(
cls=self.__class__.__name__,
got=len(extra),
size=size,
max=2 ** size,
)
)
super(FlagEnum, self).__init__(owner, offset, size, extra)
def get(self):
val = self.get_bits()
try:
return self.extra[val]
except IndexError:
return "[invalid value]"
def set(self, val):
if val in self.extra:
self.set_bits(self.extra.index(val))
elif isinstance(val, int):
self.set_bits(val)
else:
raise TypeError(
"Invalid value {val} for {cls}".format(
val=val, cls=self.__class__.__name__
)
)
# Class representing a single flag schema for FlagWord.
# 'nbits' defaults to 1, and 'extra' defaults to None.
FlagField = namedtuple(
"FlagField", ("name", "ftype", "nbits", "extra"), defaults=(1, None)
)
class FlagWord(object):
"""\
Class to wrap an integer in bitwise flag/field accessors.
"""
__slots__ = [
"_nbits",
"_value",
"_schema",
]
def __init__(self, schema, nbits=32, initial=0):
"""
:param schema:
A list of FlagField objects representing the values to be packed
into this object, in order from LSB to MSB of the underlying int
:param nbits:
An integer representing the total number of bits used for flags
:param initial:
The initial integer value of the flags field
"""
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if tot_bits > nbits:
raise TypeError(
"Too many fields for {nbits}-bit field "
"(schema defines {tot} bits)".format(nbits=nbits, tot=tot_bits)
)
bitn = 0
for item in schema:
if not isinstance(item, FlagField):
raise TypeError("Schema must be composed of FlagField objects")
if not issubclass(item.ftype, FlagBase):
raise TypeError("Expected FlagBase, got {}".format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
def __int__(self):
return self._value
def __repr__(self):
rv = "<{0} (value={1})".format(self.__class__.__name__, self._value)
for k, v in self._schema.items():
rv += " {0}={1}".format(k, v.get())
return rv + ">"
def __getattr__(self, name):
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.get()
def __setattr__(self, name, val):
try:
return object.__setattr__(self, name, val)
except AttributeError:
pass
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.set(val)
| apache-2.0 | 8,224,031,001,209,343,000 | 27.127551 | 87 | 0.534736 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/test/test_color_siting_test.py | 1 | 1466 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.color_siting_test import ColorSitingTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestColorSitingTest(unittest.TestCase):
"""ColorSitingTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ColorSitingTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.color_siting_test.ColorSitingTest() # noqa: E501
if include_optional :
return ColorSitingTest(
color_siting = 'CoSiting',
reject_on_error = True,
checked = True
)
else :
return ColorSitingTest(
)
def testColorSitingTest(self):
"""Test ColorSitingTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | -6,175,895,556,517,951,000 | 25.654545 | 94 | 0.635744 | false |
roaet/quark | quark/db/migration/alembic/versions/79b768afed65_rename_tenant_id_indexes.py | 1 | 6050 | """rename tenant id indexes
Revision ID: 79b768afed65
Revises: 271cce54e15b
Create Date: 2015-05-20 21:39:19.348638
"""
# revision identifiers, used by Alembic.
revision = '79b768afed65'
down_revision = '271cce54e15b'
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attr
_INSPECTOR = None
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR
def get_tables():
tables = [
'quark_tags',
'quark_routes',
'quark_dns_nameservers',
'quark_security_group_rules',
'quark_security_groups',
'quark_ports',
'quark_mac_addresses',
'quark_ip_policy',
'quark_subnets',
'quark_networks',
'quark_async_transactions',
'quotas',
'address_scopes',
'floatingips',
'meteringlabels',
'networkrbacs',
'networks',
'ports',
'qos_policies',
'qospolicyrbacs',
'reservations',
'routers',
'securitygrouprules',
'securitygroups',
'subnetpools',
'subnets',
'trunks',
'auto_allocated_topologies',
'default_security_group',
'ha_router_networks',
'quotausages',
'vips',
'members',
'pools',
'healthmonitors',
'lbaas_members',
'lbaas_healthmonitors',
'lbaas_loadbalancers',
'lbaas_pools',
'lbaas_l7rules',
'lbaas_l7policies',
'lbaas_listeners',
]
return tables
def get_columns(table):
"""Returns list of columns for given table."""
inspector = get_inspector()
return inspector.get_columns(table)
def get_data():
"""Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
"""
output = []
tables = get_tables()
for table in tables:
try:
columns = get_columns(table)
except sa.exc.NoSuchTableError:
continue
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output
def alter_column(table, column):
old_name = 'tenant_id'
new_name = 'project_id'
coltype = sa.String(attr.TENANT_ID_MAX_LEN)
op.alter_column(
table_name=table,
column_name=old_name,
new_column_name=new_name,
type_=coltype,
existing_nullable=column['nullable']
)
def recreate_index(index, table_name):
old_name = index['name']
new_name = old_name.replace('tenant', 'project')
op.drop_index(op.f(old_name), table_name)
op.create_index(new_name, table_name, ['project_id'])
def upgrade():
data = get_data()
for table, column in data:
alter_column(table, column)
op.drop_index(op.f('ix_quark_networks_tenant_id'),
table_name='quark_networks')
op.drop_index(op.f('ix_quark_networks_tenant_id_name'),
table_name='quark_networks')
op.drop_index(op.f('ix_quark_subnets_tenant_id'),
table_name='quark_subnets')
op.drop_index(op.f('ix_quark_subnets_network_id_tenant_id'),
table_name='quark_subnets')
op.drop_index(op.f('ix_quark_ports_tenant_id'),
table_name='quark_ports')
op.drop_index(op.f('ix_quark_ports_network_id_tenant_id'),
table_name='quark_ports')
op.drop_index(op.f('ix_quark_ports_name_tenant_id'),
table_name='quark_ports')
op.drop_index(op.f('ix_quotas_tenant_id'),
table_name='quotas')
op.create_index(op.f('ix_quark_networks_project_id'),
'quark_networks',
['project_id'],
unique=False)
op.create_index(op.f('ix_quark_networks_project_id_name'),
'quark_networks',
['project_id', 'name'],
unique=False)
op.create_index(op.f('ix_quark_subnets_project_id'),
'quark_subnets',
['project_id'],
unique=False)
op.create_index(op.f('ix_quark_subnets_network_id_project_id'),
'quark_subnets',
['network_id', 'project_id'],
unique=False)
op.create_index(op.f('ix_quark_ports_project_id'),
'quark_ports',
['project_id'],
unique=False)
op.create_index(op.f('ix_quark_ports_network_id_project_id'),
'quark_ports',
['network_id', 'project_id'],
unique=False)
op.create_index(op.f('ix_quark_ports_name_project_id'),
'quark_ports',
['name', 'project_id'],
unique=False)
op.create_index(op.f('ix_quotas_project_id'),
'quotas',
['project_id'],
unique=False)
def downgrade():
op.drop_index(op.f('ix_quark_networks_project_id'),
table_name='quark_networks')
op.drop_index(op.f('ix_quark_networks_project_id_name'),
table_name='quark_networks')
op.drop_index(op.f('ix_quark_subnets_project_id'),
table_name='quark_subnets')
op.drop_index(op.f('ix_quark_subnets_network_id_project_id'),
table_name='quark_subnets')
op.drop_index(op.f('ix_quark_ports_project_id'),
table_name='ports')
op.drop_index(op.f('ix_quark_ports_network_id_project_id'),
table_name='quark_ports')
op.drop_index(op.f('ix_quark_ports_name_project_id'),
table_name='quark_ports')
op.drop_index(op.f('ix_quotas_project_id'),
table_name='quotas')
| apache-2.0 | -3,824,265,833,923,501,600 | 27.403756 | 69 | 0.546612 | false |
pykiki/PyKI | tests/test_check_key_cert.py | 1 | 2012 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from OpenSSL import crypto, SSL
from os import path
'''
PyKI - PKI openssl for managing TLS certificates
Copyright (C) 2016 MAIBACH ALAIN
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact: [email protected] / 34 rue appienne 13480 - FRANCE.
'''
certPath = "/Users/albookpro/Downloads/pyTLSpki/building/pki/CERTS/clients/newcsr/newcsr.crt"
keyPath = "/Users/albookpro/Downloads/pyTLSpki/building/pki/CERTS/clients/newcsr/newcsr.key"
def check_cer_vs_key(cert, key, keypass=False):
if not path.exists(cert):
print("Error, unable to find " + cert + "\n")
exit(1)
elif not path.exists(key):
print("Error, unable to find " + key + "\n")
exit(1)
if not keypass:
keyObj = crypto.load_privatekey(crypto.FILETYPE_PEM, open(key).read())
else:
keyObj = crypto.load_privatekey(
crypto.FILETYPE_PEM, open(key).read(), keypass)
certObj = crypto.load_certificate(crypto.FILETYPE_PEM, open(cert).read())
ctx = SSL.Context(SSL.TLSv1_METHOD)
ctx.use_privatekey(keyObj)
ctx.use_certificate(certObj)
try:
ctx.check_privatekey()
except SSL.Error:
print("Incorrect key.\n")
else:
print("Key matches certificate.\n")
# interactive mode
#check_cer_vs_key(certPath, keyPath)
check_cer_vs_key(certPath, keyPath, b'azerty')
| gpl-3.0 | -7,313,299,838,045,761,000 | 32.533333 | 93 | 0.687873 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.