repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
guillaume-philippon/aquilon | lib/aquilon/worker/processes.py | 1 | 37384 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handling of external processes for the broker happens here.
Most methods will be called as part of a callback chain, and should
expect to handle a generic result from whatever happened earlier in
the chain.
"""
import os
import re
import logging
from contextlib import contextmanager
from subprocess import Popen, PIPE
from tempfile import mkdtemp
from threading import Thread
from six import iteritems
from mako.lookup import TemplateLookup
from twisted.python import context
from twisted.python.log import callWithContext, ILogContext
from aquilon.exceptions_ import (ProcessException, AquilonError, ArgumentError,
InternalError)
from aquilon.config import Config, running_from_source
from aquilon.aqdb.model import Machine
from aquilon.utils import remove_dir
LOGGER = logging.getLogger(__name__)
class StreamLoggerThread(Thread):
"""Helper class for streaming output as it becomes available."""
def __init__(self, logger, loglevel, process, stream, filterre=None,
context=None):
self.logger = logger
self.loglevel = loglevel
self.process = process
self.stream = stream
self.filterre = filterre
self.context = context
self.buffer = []
Thread.__init__(self)
def run(self):
while True:
data = self.stream.readline()
if data == '' and (self.stream.closed or
self.process.returncode is not None):
break
if data != '':
if self.filterre and not self.filterre.search(data):
continue
self.buffer.append(data)
if self.context:
callWithContext(self.context, self.logger.log,
self.loglevel, data.rstrip())
else:
self.logger.log(self.loglevel, data.rstrip())
def run_command(args, env=None, path="/", logger=LOGGER, loglevel=logging.INFO,
stream_level=None, filterre=None, input=None):
'''Run the specified command (args should be a list corresponding to ARGV).
Returns any output (stdout only). If the command fails, then
ProcessException will be raised. To pass the output back to the client
pass in a logger and specify loglevel as CLIENT_INFO.
To reduce the captured output, pass in a compiled regular expression
with the filterre keyword argument. Any output lines on stdout will
only be kept if filterre.search() finds a match.
'''
if env:
shell_env = env.copy()
else:
shell_env = {}
# Make sure that environment is properly kerberized.
for envname, envvalue in os.environ.items():
# AQTEST<something> is used by the testsuite
if envname.startswith("KRB") or envname.startswith("AQTEST"):
shell_env[envname] = envvalue
# Add a default value for the PATH.
for envname in ["PATH"]:
if envname not in shell_env and envname in os.environ:
shell_env[envname] = os.environ[envname]
# Force any arguments to be strings... takes care of unicode from
# the database.
command_args = [str(arg) for arg in args]
# If the command was not given with an absolute path, then check if there's
# an override specified in the config file. If not, we'll rely on $PATH.
if command_args[0][0] != "/":
config = Config()
command_args[0] = config.lookup_tool(command_args[0])
simple_command = " ".join(command_args)
logger.log(loglevel, "run_command: %s (CWD: %s)", simple_command,
os.path.abspath(path))
if input:
proc_stdin = PIPE
logger.info("command `%s` stdin: %s", simple_command, input)
else:
proc_stdin = None
# The context contains the log prefix
ctx = (context.get(ILogContext) or {}).copy()
p = Popen(args=command_args, stdin=proc_stdin, stdout=PIPE, stderr=PIPE,
cwd=path, env=shell_env)
# If we want to stream the command's output back to the client while the
# command is still executing, then we have to doit ourselves. Otherwise,
# p.communicate() does everything.
if stream_level is None:
out, err = p.communicate(input=input)
if filterre:
out = "\n".join(line for line in out.splitlines()
if filterre.search(line))
else:
out_thread = StreamLoggerThread(logger, stream_level, p, p.stdout,
filterre=filterre, context=ctx)
err_thread = StreamLoggerThread(logger, stream_level, p, p.stderr, context=ctx)
out_thread.start()
err_thread.start()
if proc_stdin:
p.stdin.write(input)
p.stdin.close()
p.wait()
out_thread.join()
err_thread.join()
out = "".join(out_thread.buffer)
err = "".join(err_thread.buffer)
if p.returncode >= 0:
logger.log(loglevel, "command `%s` exited with return code %d",
simple_command, p.returncode)
retcode = p.returncode
signal_num = None
else: # pragma: no cover
logger.log(loglevel, "command `%s` exited with signal %d",
simple_command, -p.returncode)
retcode = None
signal_num = -p.returncode
if err:
logger.log(loglevel, "command `%s` stderr: %s", simple_command, err)
if p.returncode != 0:
raise ProcessException(command=simple_command, out=out, err=err,
code=retcode, signalNum=signal_num,
filtered=bool(filterre))
return out
def run_git(args, env=None, path=".", logger=LOGGER, loglevel=logging.INFO,
filterre=None, stream_level=None):
config = Config()
if env:
git_env = env.copy()
else:
git_env = {}
git_env["PATH"] = git_env.get("PATH", os.environ.get("PATH", ""))
for name in ["git_author_name", "git_author_email",
"git_committer_name", "git_committer_email"]:
if not config.has_option("broker", name):
continue
value = config.get("broker", name)
git_env[name.upper()] = value
if isinstance(args, list):
git_args = args[:]
if git_args[0] != "git":
git_args.insert(0, "git")
else:
git_args = ["git", args]
return run_command(git_args, env=git_env, path=path, logger=logger,
loglevel=loglevel, filterre=filterre,
stream_level=stream_level)
def cache_version(config, logger=LOGGER):
"""Try to determine the broker version by examining the path
to this source file. If this file path matches
/aquilon/PROJ/aqd/<version>/ (likely /ms/dist) or
/aquilon/aqd/<version>/ (likely /ms/dev) then use <version>.
Otherwise, run git describe to get the most recent tag.
"""
if config.has_option("broker", "version"):
return
version_re = re.compile(r'/aquilon(?:/PROJ)?/aqd/([^/]+)/')
m = version_re.search(__file__)
if m and m.group(1) != "lib" and m.group(1) != "bin":
config.set("broker", "version", m.group(1))
return
try:
out = run_git("describe", logger=logger,
path=config.get("broker", "srcdir"))
config.set("broker", "version", out.strip())
except ProcessException as e:
logger.info("Could not run git describe to get version: %s", e)
config.set("broker", "version", "Unknown")
class GitRepo(object):
"""
Git repository wrapper
This class is not meant to be a simple wrapper around git, but rather to
implement higher level functions - even if some of those functions can be
translated to a single git command.
"""
def __init__(self, path, logger, loglevel=logging.INFO):
self.path = path
self.logger = logger
self.loglevel = loglevel
@staticmethod
def template_king(logger, loglevel=logging.INFO):
"""
Constructor for template-king
"""
config = Config()
return GitRepo(config.get("broker", "kingdir"), logger=logger,
loglevel=loglevel)
@staticmethod
def domain(domain, logger, loglevel=logging.INFO):
"""
Constructor for domains
"""
config = Config()
domainsdir = config.get('broker', 'domainsdir')
return GitRepo(os.path.join(domainsdir, domain), logger=logger,
loglevel=loglevel)
def run(self, args, filterre=None, stream_level=None):
return run_git(args, path=self.path, logger=self.logger,
loglevel=self.loglevel, filterre=filterre,
stream_level=stream_level)
def ref_contains_commit(self, commit_id, ref='HEAD'):
"""
Check if a given reference (by default, HEAD) contains a given commit ID
"""
filterre = re.compile('^' + commit_id + '$')
try:
found = self.run(['rev-list', ref], filterre=filterre)
except ProcessException as pe:
if pe.code != 128:
raise
else:
found = None
return found
def ref_commit(self, ref='HEAD', compel=True):
"""
Return the top commit of a ref, by default HEAD
"""
try:
commit = self.run(['rev-parse', '--verify', '-q', ref + '^{commit}'])
return commit.strip()
except ProcessException as pe:
if pe.code == 1:
if compel:
raise ArgumentError("Ref %s could not be translated to an "
"existing commit ID." % ref)
return None
raise
def ref_tree(self, ref='HEAD', compel=True):
"""
Return the tree ID a ref (by default, HEAD) points to
"""
try:
tree = self.run(['rev-parse', '--verify', '-q', ref + '^{tree}'])
return tree.strip()
except ProcessException as pe:
if pe.code == 1:
if compel:
raise ArgumentError("Ref %s not found.", ref)
return None
raise
@contextmanager
def temp_clone(self, branch):
"""
Create a temporary clone for working on the given branch
This function is a context manager meant to be used in a with statement.
The temporary clone is removed automatically.
"""
config = Config()
# TODO: is rundir suitable for this purpose?
rundir = config.get("broker", "rundir")
tempdir = mkdtemp(prefix="git_clone_", dir=rundir)
try:
run_git(["clone", "--shared", "--branch", branch, "--",
self.path, branch],
path=tempdir, logger=self.logger, loglevel=self.loglevel)
yield GitRepo(os.path.join(tempdir, branch), logger=self.logger,
loglevel=self.loglevel)
finally:
remove_dir(tempdir, logger=self.logger)
def push_origin(self, ref, force=False):
"""
Push a ref to the origin remote
"""
if force:
self.run(["push", "--force", "origin", ref])
else:
self.run(["push", "origin", ref])
IP_NOT_DEFINED_RE = re.compile(r"Host with IP address "
r"[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
r" is not defined")
BUILDING_NOT_FOUND = re.compile(r"bldg [a-zA-Z0-9]{2} doesn't exists")
CAMPUS_NOT_FOUND = re.compile(r"campus [a-zA-Z0-9]{2} doesn't exist")
DNS_DOMAIN_NOT_FOUND = re.compile(r"DNS domain ([-\w\.\d]+) doesn't exists")
DNS_DOMAIN_EXISTS = re.compile(r"DNS domain [-\w\.\d]+ already defined")
# The regexp is taken from DSDB
INVALID_NAME_RE = re.compile(r"[^A-Za-z0-9_.-]")
class DSDBRunner(object):
def __init__(self, logger=LOGGER):
config = Config()
self.logger = logger
self.dsdb_use_testdb = config.getboolean("broker", "dsdb_use_testdb")
self.location_sync = config.getboolean("broker", "dsdb_location_sync")
self.actions = []
self.rollback_list = []
def normalize_iface(self, iface):
return INVALID_NAME_RE.sub("_", iface)
def commit(self, verbose=False):
for args, rollback, error_filter, ignore_msg in self.actions:
cmd = ["dsdb"]
cmd.extend(args)
try:
if verbose:
self.logger.client_info("DSDB: %s" %
" ".join(str(a) for a in args))
run_command(cmd, env=self.getenv(), logger=self.logger)
except ProcessException as err:
if error_filter and err.out and error_filter.search(err.out):
self.logger.warning(ignore_msg)
else:
raise
if rollback:
self.rollback_list.append(rollback)
def rollback(self, verbose=False):
self.rollback_list.reverse()
rollback_failures = []
for args in self.rollback_list:
cmd = ["dsdb"]
cmd.extend(args)
try:
self.logger.client_info("DSDB: %s" %
" ".join(str(a) for a in args))
run_command(cmd, env=self.getenv(), logger=self.logger)
except ProcessException as err:
rollback_failures.append(str(err))
did_something = bool(self.rollback_list)
del self.rollback_list[:]
if rollback_failures:
raise AquilonError("DSDB rollback failed, DSDB state is "
"inconsistent: " + "\n".join(rollback_failures))
elif did_something:
self.logger.client_info("DSDB rollback completed.")
def commit_or_rollback(self, error_msg=None, verbose=False):
try:
self.commit(verbose=verbose)
except ProcessException as err:
if not error_msg:
error_msg = "DSDB update failed"
self.logger.warning(str(err))
self.rollback(verbose=verbose)
raise ArgumentError(error_msg)
def add_action(self, command_args, rollback_args, error_filter=None,
ignore_msg=False):
"""
Register an action to execute and it's rollback counterpart.
command_args: the DSDB command to execute
rollback_args: the DSDB command to execute on rollback
error_filter: regexp of error messages in the output of dsdb that
should be ignored
ignore_msg: message to log if the error_filter matched
"""
if error_filter and not ignore_msg:
raise InternalError("Specifying an error filter needs the message "
"specified as well.")
self.actions.append((command_args, rollback_args, error_filter,
ignore_msg))
def getenv(self):
if self.dsdb_use_testdb:
return {"DSDB_USE_TESTDB": "true"}
return None
def add_campus(self, campus, comments):
if not self.location_sync:
return
command = ["add_campus_aq", "-campus_name", campus]
if comments:
command.extend(["-comments", comments])
rollback = ["delete_campus_aq", "-campus", campus]
self.add_action(command, rollback)
def del_campus(self, campus):
if not self.location_sync:
return
command = ["delete_campus_aq", "-campus", campus]
rollback = ["add_campus_aq", "-campus_name", campus]
self.add_action(command, rollback, CAMPUS_NOT_FOUND,
"DSDB does not have campus %s defined, proceeding.")
def add_city(self, city, country, fullname):
if not self.location_sync:
return
command = ["add_city_aq", "-city_symbol", city, "-country_symbol",
country, "-city_name", fullname]
rollback = ["delete_city_aq", "-city", city]
self.add_action(command, rollback)
def update_city(self, city, campus, prev_campus):
if not self.location_sync:
return
command = ["update_city_aq", "-city", city, "-campus", campus]
# We can't revert to an empty campus
if prev_campus:
rollback = ["update_city_aq", "-city", city, "-campus", prev_campus]
else:
rollback = None
self.add_action(command, rollback)
def del_city(self, city, old_country, old_fullname):
if not self.location_sync:
return
command = ["delete_city_aq", "-city", city]
rollback = ["add_city_aq", "-city_symbol", city, "-country_symbol",
old_country, "-city_name", old_fullname]
self.add_action(command, rollback)
def add_campus_building(self, campus, building):
if not self.location_sync:
return
command = ["add_campus_building_aq", "-campus_name", campus,
"-building_name", building]
rollback = ["delete_campus_building_aq", "-campus_name", campus,
"-building_name", building]
self.add_action(command, rollback)
def add_building(self, building, city, building_addr):
if not self.location_sync:
return
command = ["add_building_aq", "-building_name", building, "-city", city,
"-building_addr", building_addr]
rollback = ["delete_building_aq", "-building", building]
self.add_action(command, rollback)
def del_campus_building(self, campus, building):
if not self.location_sync:
return
command = ["delete_campus_building_aq", "-campus_name", campus,
"-building_name", building]
rollback = ["add_campus_building_aq", "-campus_name", campus,
"-building_name", building]
self.add_action(command, rollback)
def del_building(self, building, old_city, old_addr):
if not self.location_sync:
return
command = ["delete_building_aq", "-building", building]
rollback = ["add_building_aq", "-building_name", building,
"-city", old_city, "-building_addr", old_addr]
self.add_action(command, rollback, BUILDING_NOT_FOUND,
"DSDB does not have building %s defined, "
"proceeding." % building)
def update_building(self, building, address, old_addr):
if not self.location_sync:
return
command = ["update_building_aq", "-building_name", building,
"-building_addr", address]
rollback = ["update_building_aq", "-building_name", building,
"-building_addr", old_addr]
self.add_action(command, rollback)
def add_host_details(self, fqdn, ip, iface=None, mac=None, primary=None,
comments=None, **_):
command = ["add_host", "-host_name", fqdn,
"-ip_address", ip, "-status", "aq"]
if iface:
command.extend(["-interface_name", self.normalize_iface(iface)])
if mac:
command.extend(["-ethernet_address", mac])
if primary and str(primary) != str(fqdn):
command.extend(["-primary_host_name", primary])
if comments:
command.extend(["-comments", comments])
rollback = ["delete_host", "-ip_address", ip]
self.add_action(command, rollback)
def update_host_details(self, fqdn, iface=None, new_ip=None, new_mac=None,
new_comments=None, old_ip=None, old_mac=None,
old_comments=None, **_):
command = ["update_aqd_host", "-host_name", fqdn]
if iface:
iface = self.normalize_iface(iface)
command.extend(["-interface_name", iface])
rollback = command[:]
if new_ip and new_ip != old_ip:
command.extend(["-ip_address", new_ip])
rollback.extend(["-ip_address", old_ip])
if new_mac and new_mac != old_mac:
command.extend(["-ethernet_address", new_mac])
rollback.extend(["-ethernet_address", old_mac])
if new_comments != old_comments:
command.extend(["-comments", new_comments or ""])
rollback.extend(["-comments", old_comments or ""])
self.add_action(command, rollback)
def update_host_iface_name(self, old_fqdn, new_fqdn,
old_iface, new_iface, **_):
old_iface = self.normalize_iface(old_iface)
new_iface = self.normalize_iface(new_iface)
command = ["update_aqd_host", "-host_name", old_fqdn]
rollback = ["update_aqd_host", "-host_name", new_fqdn]
if old_fqdn != new_fqdn:
command.extend(["-new_host_name", new_fqdn])
rollback.extend(["-new_host_name", old_fqdn])
if old_iface and old_iface != new_iface:
command.extend(["-interface_name", old_iface,
"-new_interface_name", new_iface])
rollback.extend(["-interface_name", new_iface,
"-new_interface_name", old_iface])
self.add_action(command, rollback)
def delete_host_details(self, fqdn, ip, iface=None, mac=None, primary=None,
comments=None, **_):
command = ["delete_host", "-ip_address", ip]
rollback = ["add_host", "-host_name", fqdn,
"-ip_address", ip, "-status", "aq"]
if iface:
rollback.extend(["-interface_name", self.normalize_iface(iface)])
if mac:
rollback.extend(["-ethernet_address", mac])
if primary and str(primary) != str(fqdn):
rollback.extend(["-primary_host_name", primary])
if comments:
rollback.extend(["-comments", comments])
self.add_action(command, rollback, IP_NOT_DEFINED_RE,
"DSDB did not have a host with this IP address, "
"proceeding.")
@classmethod
def snapshot_hw(cls, dbhw_ent):
"""
Make a snapshot of the interface parameters.
update_host() will use this snapshot to decide what has changed and
what DSDB commands have to be executed.
Comment handling is a bit complicated, because we have more ways to
store comments in Aquilon than in DSDB. The rules are:
- If the interface has a comment, use that.
- Otherwise take the comment from the hardware entity.
Exception: management interfaces
"""
real_primary = dbhw_ent.fqdn
hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": real_primary}
# For each of the addresses held by this hardware_entity we need to
# create an entry in DSDB. The following loop makes a snapshot of
# expected state of the information in DSDB.
for addr in dbhw_ent.all_addresses():
# Do not propergate to DSDB if the network is not internal,
# there are no FQDN's associated with this address, or
# the address is shared with other devices.
if not addr.network.is_internal:
continue
if not addr.fqdns:
continue
if addr.is_shared:
continue
# In AQDB there may be multiple domain names associated with
# an address, in DSDB there can only be one. Thus we pick
# the first address to propergate.
dns_record = addr.dns_records[0]
# By default we take the comments from the hardware_entity,
# if an interface comment exists then this will be taken
# in preference. Management interfaces are added as stand-alone
# entries, therefore we do not take the hardware_entity comment
# but allow the following code to take it from the interface.
if addr.interface.interface_type != 'management':
comments = dbhw_ent.comments
else:
comments = None
iface = addr.logical_name
if addr.interface.comments and not \
addr.interface.comments.startswith("Created automatically"):
comments = addr.interface.comments
# Determine if we need to specify a primary name to DSDB. By
# doing so we are associating this record with another.
# Note, the existence of a primary hostname affects the order
# that entriers are processed in update_host()
if addr.interface.interface_type == "management":
# Do not use -primary_host_name for the management address
# as we do not wish to associate them with the host currently
# on the machine (which may change).
primary = None
elif str(dns_record.fqdn) == real_primary:
# Avoid circular dependency - do not set the 'primary' key for
# the real primary name
primary = None
elif not isinstance(dbhw_ent, Machine):
# Not a machine - we don't care about srvloc
primary = real_primary
elif dns_record.reverse_ptr and str(dns_record.reverse_ptr.fqdn) == real_primary:
# If the reverse PTR record points to the primary name in AQDB,
# then pass the -primary_name flag to DSDB
primary = real_primary
else:
# Avoid using -primary_name, to please srvloc
primary = None
# Exclude the MAC address for aliases
if addr.label:
mac = None
else:
mac = addr.interface.mac
ifdata = {'iface': iface,
'ip': addr.ip,
'mac': mac,
'fqdn': str(dns_record.fqdn),
'primary': primary,
'comments': comments}
hwdata["by-ip"][ifdata["ip"]] = ifdata
hwdata["by-fqdn"][ifdata["fqdn"]] = ifdata
# The primary address of Zebra hosts needs extra care. Here, we cheat a
# bit - we do not check if the primary name is a service address, but
# instead check if it has an IP address and it was not handled above.
if dbhw_ent.primary_ip and \
str(dbhw_ent.primary_name.fqdn) not in hwdata["by-fqdn"]:
ifdata = {'iface': "vip",
'ip': dbhw_ent.primary_ip,
'mac': None,
'fqdn': str(dbhw_ent.primary_name),
'primary': None,
'comments': None}
hwdata["by-ip"][ifdata["ip"]] = ifdata
hwdata["by-fqdn"][ifdata["fqdn"]] = ifdata
return hwdata
def update_host(self, dbhw_ent, old_hwdata):
"""Update a dsdb host entry.
The calling code (the aq update_interface command) treats the
hostname and interface name (except for zebra hosts!) as unchanging .
There is an update_host dsdb command that lets the mac address,
ip address (and comments, if we kept them) change.
Any other changes have to be done by removing the old DSDB
entry and adding a new one.
Please note that in case of zebra interfaces adding a new ip address
to the same interface may result in adding/removing DSDB entries.
"""
if dbhw_ent:
new_hwdata = self.snapshot_hw(dbhw_ent)
else:
new_hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": None}
if not old_hwdata:
old_hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": None}
deletes = []
adds = []
# Host/interface names cannot be updated simultaneously with IP/MAC
# addresses or comments
addr_updates = []
name_updates = []
# Run through all of the entries in the old snapshot and attempt
# to match them to their corrisponding new entry.
for fqdn, old_ifdata in old_hwdata["by-fqdn"].items():
# Locate the new information about this address by either
# its FQDN or IP address.
if fqdn in new_hwdata["by-fqdn"]:
new_ifdata = new_hwdata["by-fqdn"][fqdn]
elif old_ifdata["ip"] in new_hwdata["by-ip"]:
new_ifdata = new_hwdata["by-ip"][old_ifdata["ip"]]
else:
new_ifdata = None
# If either the old or the new entry is bound to a primary name but
# the other is not, then we have to delete & re-add it. Note this
# will be re-added in the following loop as we did not delete the
# entry from new_hwdata.
if new_ifdata and bool(old_ifdata["primary"]) != bool(new_ifdata["primary"]):
new_ifdata = None
# If there is no new data then record a delete (note above).
if not new_ifdata:
deletes.append(old_ifdata)
continue
# Create a dict with entries in old_ifdata prefiexd with 'old_'
# and entries in new_ifdata prefixed with 'new_'
kwargs = {p + k: v
for (p, d) in [('old_', old_ifdata),
('new_', new_ifdata)]
for k, v in iteritems(d)}
if old_ifdata['ip'] != new_ifdata['ip'] or \
old_ifdata['mac'] != new_ifdata['mac'] or \
old_ifdata['comments'] != new_ifdata['comments']:
addr_updates.append(kwargs)
if old_ifdata['fqdn'] != new_ifdata['fqdn'] or \
old_ifdata['iface'] != new_ifdata['iface']:
name_updates.append(kwargs)
# Delete the entries from new_hwdata. We have recorded an
# update. The contents of new_hwdata is used in the following
# loop to record additions.
del new_hwdata["by-fqdn"][new_ifdata["fqdn"]]
del new_hwdata["by-ip"][new_ifdata["ip"]]
# For all of the recoreds remaining in new_hwdata (see above)
# record an addtion opperation.
adds = new_hwdata["by-fqdn"].values()
# Add the primary address first, and delete it last. The primary address
# is identified by having an empty ['primary'] key (this is true for the
# management address as well, but that does not matter).
sort_by_primary = lambda x: x['primary'] or ""
adds.sort(key=sort_by_primary)
deletes.sort(key=sort_by_primary, reverse=True)
for attrs in deletes:
self.delete_host_details(**attrs)
for kwargs in addr_updates:
# The old FQDN and interface name are the fixed point
self.update_host_details(fqdn=kwargs['old_fqdn'],
iface=kwargs['old_iface'],
**kwargs)
for kwargs in name_updates:
self.update_host_iface_name(**kwargs)
for attrs in adds:
self.add_host_details(**attrs)
def add_dns_domain(self, dns_domain, comments):
if not comments:
# DSDB requires the comments field, even if it is empty
comments = ""
command = ["add_dns_domain", "-domain_name", dns_domain,
"-comments", comments]
rollback = ["delete_dns_domain", "-domain_name", dns_domain]
self.add_action(command, rollback, DNS_DOMAIN_EXISTS,
"The DNS domain %s already exists in DSDB, "
"proceeding." % dns_domain)
def delete_dns_domain(self, dns_domain, old_comments):
command = ["delete_dns_domain", "-domain_name", dns_domain]
rollback = ["add_dns_domain", "-domain_name", dns_domain,
"-comments", old_comments]
self.add_action(command, rollback, DNS_DOMAIN_NOT_FOUND,
"The DNS domain %s does not exist in DSDB, "
"proceeding." % dns_domain)
rack_row_re = re.compile(r'^\s*Row:\s*\b([-\w]+)\b$', re.M)
rack_col_re = re.compile(r'^\s*Column:\s*\b([-\w]+)\b$', re.M)
def show_rack(self, rackname):
out = run_command(["dsdb", "show_rack", "-rack_name", rackname],
env=self.getenv())
rack_row = self.rack_row_re.search(out)
rack_col = self.rack_col_re.search(out)
fields = {}
fields["rack_row"] = rack_row and rack_row.group(1) or None
fields["rack_col"] = rack_col and rack_col.group(1) or None
if not fields["rack_row"] or not fields["rack_col"]:
raise ValueError("Rack %s is missing row and/or col data")
return fields
primary_re = re.compile(r'^\s*Primary Name:\s*\b([-\w]+)\b$', re.M)
node_re = re.compile(r'^\s*Node:\s*\b([-\w]+)\b$', re.M)
dns_re = re.compile(r'^\s*DNS Domain:\s*\b([-\w\.]+)\b$', re.M)
state_re = re.compile(r'^\s*State:\s*\b(\d+)\b$', re.M)
def show_host(self, hostname):
(short, dot, dns_domain) = hostname.partition(".")
fields = {}
if not dot:
fields["fqdn"] = short + ".ms.com"
fields["dsdb_lookup"] = short
elif not dns_domain:
fields["fqdn"] = short + "ms.com"
fields["dsdb_lookup"] = short
elif dns_domain != "ms.com":
fields["fqdn"] = hostname
fields["dsdb_lookup"] = hostname
else:
fields["fqdn"] = hostname
fields["dsdb_lookup"] = short
out = run_command(["dsdb", "show_host",
"-host_name", fields["dsdb_lookup"]],
env=self.getenv())
primary = self.primary_re.search(out)
node = self.node_re.search(out)
dns = self.dns_re.search(out)
state = self.state_re.search(out)
fields["primary_name"] = primary and primary.group(1) or None
fields["node"] = node and node.group(1) or None
fields["dns"] = dns and dns.group(1) or None
if state:
fields["state"] = int(state.group(1))
else:
fields["state"] = None
return fields
def add_alias(self, alias, target, comments):
command = ["add_host_alias", "-host_name", target,
"-alias_name", alias]
if comments:
command.extend(["-comments", comments])
rollback = ["delete_host_alias", "-alias_name", alias]
self.add_action(command, rollback)
def del_alias(self, alias, old_target, old_comments):
command = ["delete_host_alias", "-alias_name", alias]
rollback = ["add_host_alias", "-host_name", old_target,
"-alias_name", alias]
if old_comments:
rollback.extend(["-comments", old_comments])
self.add_action(command, rollback)
def update_alias(self, alias, target, comments, old_target, old_comments):
command = ["update_host_alias", "-alias", alias,
"-new_host", target]
rollback = ["update_host_alias", "-alias", alias,
"-new_host", old_target]
if comments != old_comments:
command.extend(["-new_comments", comments or ""])
rollback.extend(["-new_comments", old_comments or ""])
self.add_action(command, rollback)
def build_mako_lookup(config, kind, **kwargs):
# This duplicates the logic from lookup_file_path(), but we don't want to
# move the mako dependency to aquilon.config
srcdir = config.get("broker", "srcdir")
srcpath = os.path.join(srcdir, "etc", "mako", kind)
directories = []
if running_from_source():
# If we're running from the source, then ignore any installed files
directories.append(srcpath)
else:
directories.append(os.path.join("/etc", "aquilon", "mako", kind))
directories.append(os.path.join("/usr", "share", "aquilon", "mako", kind))
if os.path.exists(srcpath):
directories.append(srcpath)
return TemplateLookup(directories=directories, **kwargs)
| apache-2.0 | -3,919,078,780,565,027,300 | 38.268908 | 93 | 0.56369 | false |
kofkings/RSA_python | RSA.py | 1 | 5017 | import random
from fractions import gcd
def RSA(plainText):
# Generate Key
# p, q is 2 random large prime (512 bit)
p = generateLargePrime(512)
q = generateLargePrime(512)
while p == q:
q = generateLargePrime(512)
n = p * q
totientN = (p - 1) * (q - 1)
# PublicKey = random in (2, totientN - 1) | gcd(PublicKey, totientN) = 1
# PrivateKey * PublicKey = 1 (% totientN)
publicKey = random.randrange(2, totientN - 1)
while not (gcd(publicKey, totientN) == 1):
publicKey = random.randrange(2, totientN - 1)
privateKey = bezout(publicKey, totientN)
while privateKey < 0:
privateKey += totientN
print "Key:"
print " N:", n
print " Public Key:", publicKey
print " Private Key", privateKey
# Encrypt CipherTextNumber = PlainTextNumber ^ PublicKey % N
# Devide to many block | plainTextNumber < N
blockSize = n.bit_length() / 8
print 'Plain Text :', plainText
# Get PlainTextNumber
arrayPlainTextNumber = []
for i in xrange(len(plainText) / blockSize + 1):
arrayPlainTextNumber.append(textToNum(plainText[:blockSize]))
plainText = plainText[blockSize:]
print "Plain Text Number : ", arrayPlainTextNumber
arrayCipherTextNumber = []
for i in arrayPlainTextNumber:
arrayCipherTextNumber.append(pow(i, publicKey, n))
print "Cipher Text Number : ", arrayCipherTextNumber
# Decrypt DecryptTextNumber = CipherTextNumber ^ PrivateKey % N
decryptedText = ''
arrayDecryptedTextNumber = []
for i in arrayCipherTextNumber:
decryptedNumber = pow(i, privateKey, n)
arrayDecryptedTextNumber.append(decryptedNumber)
decryptedText += numToText(decryptedNumber)
print "Decrypted Text Number :", arrayDecryptedTextNumber
print "Decrypted Text :", decryptedText
def isPrime(n):
# Probaly large prime
# Low base for quicker test
lowPrimes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89,
97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181,
191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619,
631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743,
751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
if (n > 3):
# Test base 2
if (n & 1 != 0):
# Test base low prime
for p in lowPrimes:
if (n % p == 0):
return False
# Miller-Rabbin
# n - 1 = 2 ^ s * m
m, s = n - 1, 0
while m & 1 == 0:
m, s = m >> 1, s + 1
# Loop k time (error bound 4 ^ -k)
for i in xrange(50):
a = random.randrange(2, n - 2)
if not strong_pseudoprime(n, a, s, m):
return False
return True
return False
def strong_pseudoprime(n, a, s, m):
# Odd composite number n = m * 2 ^ s + 1 is called a strong (Fermat) pseudoprime when one of the following conditions holds:
# a ^ m % n = 1
# or a ^ (d * 2 ^ r) = n - 1 for 0 <= r < s
b = pow(a, m, n)
if b == 1:
return True
for i in xrange(s):
if b == n - 1:
return True
b = b * b % n
return False
def generateLargePrime(k):
# Random number then check primality
n = random.randrange(2 ** (k - 1), 2 ** (k))
while not isPrime(n):
n = random.randrange(2 ** (k - 1), 2 ** (k))
return n
def bezout(a, b):
# Bezout: Input a, b can find x, y, gcd(a, b) | x * a + y * b = gcd(a, b)
# In RSA:
# a <=> PublicKey, b <=> totientN, x <=> PrivateKey
x1, y1 = 1, 0
x2, y2 = 0, 1
while b:
temp = a // b
a, b = b, a % b
x1, y1, x2, y2 = x2, y2, x1 - temp * x2, y1 - temp * y2
return x1
def textToNum(textString):
# ASCII as number base 256 number
number = 0
for character in textString:
number = (number << 8) + ord(character)
return number
def numToText(number):
# ASCII as number base 256 number
textString = ''
while number:
textString = chr(number % 256) + textString
number >>= 8
return textString
if __name__ == '__main__':
message = raw_input("Type a message to test:")
RSA(message)
raw_input("Press a key to exit")
# Sorry about my English
| gpl-3.0 | -909,668,281,237,158,000 | 31.577922 | 128 | 0.551525 | false |
CuriosoInformatico/HoneyCheck | dhcp_watchmen.py | 1 | 3960 | import logging.config
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from threading import Thread
from scapy.all import *
import time
logger = logging.getLogger(name='elchicodepython.honeycheck')
def exec_array( array, **kwargs):
for object, method in array:
method(object, **kwargs) # == object.method()
class DHCPServer:
def __init__(self, ip, hw):
self.ip = ip
self.hw = hw
def __repr__(self):
return '<DHCPServer Object (ip = %s, hw = %s)>' % (self.ip, self.hw)
def __str__(self):
return '<DHCPServer Object (ip = %s, hw = %s)>' % (self.ip, self.hw)
class Status:
OK = 1
ROGUE_DETECTED = 2
class DHCPWatchmen:
def __init__(self, iface, fail_test, pass_test, final_exec, whitelist):
'''
:param iface: interface to watch
:param fail_test: action to trigger if a rogue dhcp server is detected
:param pass_test: action to trigger if there are no rogue dhcp servers detected
:param final_exec: action to trigger always after fail_test or pass_test
:param whitelist: list of IPs of verified DHCP servers to ignore.
'''
self.iface = iface
self.hw = get_if_hwaddr(iface)
self.fail_test = fail_test
self.pass_test = pass_test
self.final_exec = final_exec
self.whitelist = whitelist
self.dhcp_servers = {}
self.last_status = Status.OK
def check_dhcp_servers(self, number_allowed):
'''
Check if the number of DHCP Servers detected is allowed
and trigger the corresponding action to each situation
:param number_allowed: number of dhcp_servers allowed
'''
if len(self.dhcp_servers) > number_allowed:
if self.last_status != Status.ROGUE_DETECTED:
logger.warning('MORE DHCP SERVERS THAN ALLOWED: ')
self.last_status = Status.ROGUE_DETECTED
exec_array(self.fail_test, watchmen = self)
self.dhcp_servers = {}
else:
if self.last_status != Status.OK:
logger.info('All seems right')
self.last_status = Status.OK
exec_array(self.pass_test, watchmen = self)
exec_array(self.final_exec, watchmen=self)
def check_packet(self, packet):
if packet.payload.op == 2:
if self.whitelist:
if (packet.payload.src not in self.whitelist):
self.dhcp_servers[packet.payload.src] = DHCPServer(packet.payload.src, packet.src)
else:
self.dhcp_servers[packet.payload.src] = DHCPServer(packet.payload.src, packet.src)
def send_dhcp_discovery(self):
dhcp_discover = Ether(dst="ff:ff:ff:ff:ff:ff") / IP(src="0.0.0.0", dst="255.255.255.255") / UDP(sport=68, dport=67) / BOOTP(chaddr=self.hw, flags = 0x8000) / DHCP(options=[("message-type", "discover"), "end"])
sendp(dhcp_discover, verbose = 0)
logger.debug('DHCP DISCOVER SEND')
def dhcp_discovery_daemon(self, timeout):
if self.whitelist:
# There are no supposed to be DHCP servers that dont exists in the whitelist
logger.info('Whitelist enabled for ' + self.iface)
max_servers_allowed = 0
else:
# It is suppose to be at least one DHCP Server in the network
logger.info('Executing HoneyCheck in %s without Whitelist' % self.iface)
max_servers_allowed = 1
while True:
self.send_dhcp_discovery()
time.sleep(timeout)
self.check_dhcp_servers(max_servers_allowed)
def sniff_dhcp(self):
sniff(iface = self.iface, filter='udp port 68', prn = self.check_packet)
def __repr__(self):
return '<DHCPSWatchmen Object (iface = %s)>' % (self.iface)
def __str__(self):
return '<DHCPSWatchmen Object (iface = %s)>' % (self.iface)
| mit | -4,649,280,454,114,588,000 | 29.461538 | 217 | 0.60202 | false |
MotorolaMobilityLLC/external-chromium_org | third_party/gtk+/gtk/compose-parse.py | 149 | 34346 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compose-parse.py, version 1.3
#
# multifunction script that helps manage the compose sequence table in GTK+ (gtk/gtkimcontextsimple.c)
# the script produces statistics and information about the whole process, run with --help for more.
#
# You may need to switch your python installation to utf-8, if you get 'ascii' codec errors.
#
# Complain to Simos Xenitellis ([email protected], http://simos.info/blog) for this craft.
from re import findall, match, split, sub
from string import atoi
from unicodedata import normalize
from urllib import urlretrieve
from os.path import isfile, getsize
from copy import copy
import sys
import getopt
# We grab files off the web, left and right.
URL_COMPOSE = 'http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre'
URL_KEYSYMSTXT = "http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt"
URL_GDKKEYSYMSH = "http://git.gnome.org/browse/gtk%2B/plain/gdk/gdkkeysyms.h"
URL_UNICODEDATATXT = 'http://www.unicode.org/Public/5.2.0/ucd/UnicodeData.txt'
FILENAME_COMPOSE_SUPPLEMENTARY = 'gtk-compose-lookaside.txt'
# We currently support keysyms of size 2; once upstream xorg gets sorted,
# we might produce some tables with size 2 and some with size 4.
SIZEOFINT = 2
# Current max compose sequence length; in case it gets increased.
WIDTHOFCOMPOSETABLE = 5
keysymdatabase = {}
keysymunicodedatabase = {}
unicodedatabase = {}
headerfile_start = """/* GTK - The GIMP Tool Kit
* Copyright (C) 2007, 2008 GNOME Foundation
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* File auto-generated from script found at http://bugzilla.gnome.org/show_bug.cgi?id=321896
* using the input files
* Input : http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre
* Input : http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt
* Input : http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
*
* This table is optimised for space and requires special handling to access the content.
* This table is used solely by http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimple.c
*
* The resulting file is placed at http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimpleseqs.h
* This file is described in bug report http://bugzilla.gnome.org/show_bug.cgi?id=321896
*/
/*
* Modified by the GTK+ Team and others 2007, 2008. See the AUTHORS
* file for a list of people on the GTK+ Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GTK+ at ftp://ftp.gtk.org/pub/gtk/.
*/
#ifndef __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
#define __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
/* === These are the original comments of the file; we keep for historical purposes ===
*
* The following table was generated from the X compose tables include with
* XFree86 4.0 using a set of Perl scripts. Contact Owen Taylor <[email protected]>
* to obtain the relevant perl scripts.
*
* The following compose letter letter sequences confliced
* Dstroke/dstroke and ETH/eth; resolved to Dstroke (Croation, Vietnamese, Lappish), over
* ETH (Icelandic, Faroese, old English, IPA) [ D- -D d- -d ]
* Amacron/amacron and ordfeminine; resolved to ordfeminine [ _A A_ a_ _a ]
* Amacron/amacron and Atilde/atilde; resolved to atilde [ -A A- a- -a ]
* Omacron/Omacron and masculine; resolved to masculine [ _O O_ o_ _o ]
* Omacron/omacron and Otilde/atilde; resolved to otilde [ -O O- o- -o ]
*
* [ Amacron and Omacron are in Latin-4 (Baltic). ordfeminine and masculine are used for
* spanish. atilde and otilde are used at least for Portuguese ]
*
* at and Aring; resolved to Aring [ AA ]
* guillemotleft and caron; resolved to guillemotleft [ << ]
* ogonek and cedilla; resolved to cedilla [ ,, ]
*
* This probably should be resolved by first checking an additional set of compose tables
* that depend on the locale or selected input method.
*/
static const guint16 gtk_compose_seqs_compact[] = {"""
headerfile_end = """};
#endif /* __GTK_IM_CONTEXT_SIMPLE_SEQS_H__ */
"""
def stringtohex(str): return atoi(str, 16)
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def uniq(*args) :
""" Performs a uniq operation on a list or lists """
theInputList = []
for theList in args:
theInputList += theList
theFinalList = []
for elem in theInputList:
if elem not in theFinalList:
theFinalList.append(elem)
return theFinalList
def all_permutations(seq):
""" Borrowed from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178 """
""" Produces all permutations of the items of a list """
if len(seq) <=1:
yield seq
else:
for perm in all_permutations(seq[1:]):
for i in range(len(perm)+1):
#nb str[0:1] works in both string and list contexts
yield perm[:i] + seq[0:1] + perm[i:]
def usage():
print """compose-parse available parameters:
-h, --help this craft
-s, --statistics show overall statistics (both algorithmic, non-algorithmic)
-a, --algorithmic show sequences saved with algorithmic optimisation
-g, --gtk show entries that go to GTK+
-u, --unicodedatatxt show compose sequences derived from UnicodeData.txt (from unicode.org)
-v, --verbose show verbose output
-p, --plane1 show plane1 compose sequences
-n, --numeric when used with --gtk, create file with numeric values only
-e, --gtk-expanded when used with --gtk, create file that repeats first column; not usable in GTK+
--all-sequences when used with --gtk, create file with entries rejected by default
Default is to show statistics.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "pvgashune", ["help", "algorithmic", "statistics", "unicodedatatxt",
"stats", "gtk", "verbose", "plane1", "numeric", "gtk-expanded", "all-sequences"])
except:
usage()
sys.exit(2)
opt_statistics = False
opt_algorithmic = False
opt_gtk = False
opt_unicodedatatxt = False
opt_verbose = False
opt_plane1 = False
opt_numeric = False
opt_gtkexpanded = False
opt_allsequences = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-s", "--statistics"):
opt_statistics = True
if o in ("-a", "--algorithmic"):
opt_algorithmic = True
if o in ("-g", "--gtk"):
opt_gtk = True
if o in ("-u", "--unicodedatatxt"):
opt_unicodedatatxt = True
if o in ("-v", "--verbose"):
opt_verbose = True
if o in ("-p", "--plane1"):
opt_plane1 = True
if o in ("-n", "--numeric"):
opt_numeric = True
if o in ("-e", "--gtk-expanded"):
opt_gtkexpanded = True
if o == "--all-sequences":
opt_allsequences = True
if not opt_algorithmic and not opt_gtk and not opt_unicodedatatxt:
opt_statistics = True
def download_hook(blocks_transferred, block_size, file_size):
""" A download hook to provide some feedback when downloading """
if blocks_transferred == 0:
if file_size > 0:
if opt_verbose:
print "Downloading", file_size, "bytes: ",
else:
if opt_verbose:
print "Downloading: ",
sys.stdout.write('#')
sys.stdout.flush()
def download_file(url):
""" Downloads a file provided a URL. Returns the filename. """
""" Borks on failure """
localfilename = url.split('/')[-1]
if not isfile(localfilename) or getsize(localfilename) <= 0:
if opt_verbose:
print "Downloading ", url, "..."
try:
urlretrieve(url, localfilename, download_hook)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
print " done."
else:
if opt_verbose:
print "Using cached file for ", url
return localfilename
def process_gdkkeysymsh():
""" Opens the gdkkeysyms.h file from GTK+/gdk/gdkkeysyms.h """
""" Fills up keysymdb with contents """
filename_gdkkeysymsh = download_file(URL_GDKKEYSYMSH)
try:
gdkkeysymsh = open(filename_gdkkeysymsh, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the gdkkeysyms.h file and place contents in keysymdb """
linenum_gdkkeysymsh = 0
keysymdb = {}
for line in gdkkeysymsh.readlines():
linenum_gdkkeysymsh += 1
line = line.strip()
if line == "" or not match('^#define GDK_KEY_', line):
continue
components = split('\s+', line)
if len(components) < 3:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting 3 items in the line"
sys.exit(-1)
if not match('^GDK_KEY_', components[1]):
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a keysym starting with GDK_KEY_"
sys.exit(-1)
if match('^0x[0-9a-fA-F]+$', components[2]):
unival = long(components[2][2:], 16)
if unival == 0:
continue
keysymdb[components[1][8:]] = unival
else:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a hexadecimal number at the end of the line"
sys.exit(-1)
gdkkeysymsh.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
#keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is^Wwas preferential treatment for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is^was preferential treatment for Greek """
#keysymdb['combining_tilde'] = 0x342
""" Fixing VoidSymbol """
keysymdb['VoidSymbol'] = 0xFFFF
return keysymdb
def process_keysymstxt():
""" Grabs and opens the keysyms.txt file that Markus Kuhn maintains """
""" This file keeps a record between keysyms <-> unicode chars """
filename_keysymstxt = download_file(URL_KEYSYMSTXT)
try:
keysymstxt = open(filename_keysymstxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the keysyms.txt file and place content in keysymdb """
linenum_keysymstxt = 0
keysymdb = {}
for line in keysymstxt.readlines():
linenum_keysymstxt += 1
line = line.strip()
if line == "" or match('^#', line):
continue
components = split('\s+', line)
if len(components) < 5:
print "Invalid line %(linenum)d in %(filename)s: %(line)s'"\
% {'linenum': linenum_keysymstxt, 'filename': filename_keysymstxt, 'line': line}
print "Was expecting 5 items in the line"
sys.exit(-1)
if match('^U[0-9a-fA-F]+$', components[1]):
unival = long(components[1][1:], 16)
if unival == 0:
continue
keysymdb[components[4]] = unival
keysymstxt.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is preferential treatment for Greek """
""" => we get more savings if used for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is preferential treatment for Greek """
# keysymdb['combining_tilde'] = 0x342
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Oslash'] = 0x0d8
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Ssharp'] = 0x1e9e
""" This is for a missing (recently added) keysym """
keysymdb['dead_psili'] = 0x313
""" This is for a missing (recently added) keysym """
keysymdb['dead_dasia'] = 0x314
""" Allows to import Multi_key sequences """
keysymdb['Multi_key'] = 0xff20
keysymdb['zerosubscript'] = 0x2080
keysymdb['onesubscript'] = 0x2081
keysymdb['twosubscript'] = 0x2082
keysymdb['threesubscript'] = 0x2083
keysymdb['foursubscript'] = 0x2084
keysymdb['fivesubscript'] = 0x2085
keysymdb['sixsubscript'] = 0x2086
keysymdb['sevensubscript'] = 0x2087
keysymdb['eightsubscript'] = 0x2088
keysymdb['ninesubscript'] = 0x2089
keysymdb['dead_doublegrave'] = 0x030F
keysymdb['dead_invertedbreve'] = 0x0311
return keysymdb
def keysymvalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymdatabase.has_key(keysym):
return keysymdatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymvalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
#return -1
sys.exit(-1)
def keysymunicodevalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymunicodedatabase.has_key(keysym):
return keysymunicodedatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymunicodevalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
sys.exit(-1)
def rename_combining(seq):
filtered_sequence = []
for ks in seq:
if findall('^combining_', ks):
ks = sub('^combining_', 'dead_', ks)
if ks == 'dead_double_grave':
ks = 'dead_doublegrave'
if ks == 'dead_inverted_breve':
ks = 'dead_invertedbreve'
filtered_sequence.append(ks)
return filtered_sequence
keysymunicodedatabase = process_keysymstxt()
keysymdatabase = process_gdkkeysymsh()
""" Grab and open the compose file from upstream """
filename_compose = download_file(URL_COMPOSE)
try:
composefile = open(filename_compose, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Look if there is a lookaside (supplementary) compose file in the current
directory, and if so, open, then merge with upstream Compose file.
"""
xorg_compose_sequences_raw = []
for seq in composefile.readlines():
xorg_compose_sequences_raw.append(seq)
try:
composefile_lookaside = open(FILENAME_COMPOSE_SUPPLEMENTARY, 'r')
for seq in composefile_lookaside.readlines():
xorg_compose_sequences_raw.append(seq)
except IOError, (errno, strerror):
if opt_verbose:
print "I/O error(%s): %s" % (errno, strerror)
print "Did not find lookaside compose file. Continuing..."
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the compose file in xorg_compose_sequences"""
xorg_compose_sequences = []
xorg_compose_sequences_algorithmic = []
linenum_compose = 0
comment_nest_depth = 0
for line in xorg_compose_sequences_raw:
linenum_compose += 1
line = line.strip()
if match("^XCOMM", line) or match("^#", line):
continue
line = sub(r"\/\*([^\*]*|[\*][^/])\*\/", "", line)
comment_start = line.find("/*")
if comment_start >= 0:
if comment_nest_depth == 0:
line = line[:comment_start]
else:
line = ""
comment_nest_depth += 1
else:
comment_end = line.find("*/")
if comment_end >= 0:
comment_nest_depth -= 1
if comment_nest_depth < 0:
print "Invalid comment %(linenum_compose)d in %(filename)s: \
Closing '*/' without opening '/*'" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
if comment_nest_depth > 0:
line = ""
else:
line = line[comment_end + 2:]
if line is "":
continue
#line = line[:-1]
components = split(':', line)
if len(components) != 2:
print "Invalid line %(linenum_compose)d in %(filename)s: No sequence\
/value pair found" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
(seq, val ) = split(':', line)
seq = seq.strip()
val = val.strip()
raw_sequence = findall('\w+', seq)
values = split('\s+', val)
unichar_temp = split('"', values[0])
unichar = unichar_temp[1]
if len(values) == 1:
continue
codepointstr = values[1]
if values[1] == '#':
# No codepoints that are >1 characters yet.
continue
if raw_sequence[0][0] == 'U' and match('[0-9a-fA-F]+$', raw_sequence[0][1:]):
raw_sequence[0] = '0x' + raw_sequence[0][1:]
if match('^U[0-9a-fA-F]+$', codepointstr):
codepoint = long(codepointstr[1:], 16)
elif keysymunicodedatabase.has_key(codepointstr):
#if keysymdatabase[codepointstr] != keysymunicodedatabase[codepointstr]:
#print "DIFFERENCE: 0x%(a)X 0x%(b)X" % { "a": keysymdatabase[codepointstr], "b": keysymunicodedatabase[codepointstr]},
#print raw_sequence, codepointstr
codepoint = keysymunicodedatabase[codepointstr]
else:
print
print "Invalid codepoint at line %(linenum_compose)d in %(filename)s:\
%(line)s" % { "linenum_compose": linenum_compose, "filename": filename_compose, "line": line }
exit(-1)
sequence = rename_combining(raw_sequence)
reject_this = False
for i in sequence:
if keysymvalue(i) > 0xFFFF:
reject_this = True
if opt_plane1:
print sequence
break
if keysymvalue(i) < 0:
reject_this = True
break
if reject_this:
continue
if "U0342" in sequence or \
"U0313" in sequence or \
"U0314" in sequence or \
"0x0313" in sequence or \
"0x0342" in sequence or \
"0x0314" in sequence:
continue
if "dead_belowring" in sequence or\
"dead_currency" in sequence or\
"dead_belowcomma" in sequence or\
"dead_belowmacron" in sequence or\
"dead_belowtilde" in sequence or\
"dead_belowbreve" in sequence or\
"dead_belowdiaeresis" in sequence or\
"dead_belowcircumflex" in sequence:
continue
#for i in range(len(sequence)):
# if sequence[i] == "0x0342":
# sequence[i] = "dead_tilde"
if "Multi_key" not in sequence:
""" Ignore for now >0xFFFF keysyms """
if codepoint < 0xFFFF:
original_sequence = copy(sequence)
stats_sequence = copy(sequence)
base = sequence.pop()
basechar = keysymvalue(base, filename_compose, linenum_compose)
if basechar < 0xFFFF:
counter = 1
unisequence = []
not_normalised = True
skipping_this = False
for i in range(0, len(sequence)):
""" If the sequence has dead_tilde and is for Greek, we don't do algorithmically
because of lack of dead_perispomeni (i.e. conflict)
"""
bc = basechar
"""if sequence[-1] == "dead_tilde" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_horn" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_ogonek" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_psili":
sequence[i] = "dead_horn"
if sequence[-1] == "dead_dasia":
sequence[-1] = "dead_ogonek"
"""
unisequence.append(unichr(keysymunicodevalue(sequence.pop(), filename_compose, linenum_compose)))
if skipping_this:
unisequence = []
for perm in all_permutations(unisequence):
# print counter, original_sequence, unichr(basechar) + "".join(perm)
# print counter, map(unichr, perm)
normalized = normalize('NFC', unichr(basechar) + "".join(perm))
if len(normalized) == 1:
# print 'Base: %(base)s [%(basechar)s], produces [%(unichar)s] (0x%(codepoint)04X)' \
# % { "base": base, "basechar": unichr(basechar), "unichar": unichar, "codepoint": codepoint },
# print "Normalized: [%(normalized)s] SUCCESS %(c)d" % { "normalized": normalized, "c": counter }
stats_sequence_data = map(keysymunicodevalue, stats_sequence)
stats_sequence_data.append(normalized)
xorg_compose_sequences_algorithmic.append(stats_sequence_data)
not_normalised = False
break;
counter += 1
if not_normalised or opt_allsequences:
original_sequence.append(codepoint)
xorg_compose_sequences.append(original_sequence)
""" print xorg_compose_sequences[-1] """
else:
print "Error in base char !?!"
exit(-2)
else:
print "OVER", sequence
exit(-1)
else:
sequence.append(codepoint)
xorg_compose_sequences.append(sequence)
""" print xorg_compose_sequences[-1] """
def sequence_cmp(x, y):
if keysymvalue(x[0]) > keysymvalue(y[0]):
return 1
elif keysymvalue(x[0]) < keysymvalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymvalue(x[1]) > keysymvalue(y[1]):
return 1
elif keysymvalue(x[1]) < keysymvalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymvalue(x[2]) > keysymvalue(y[2]):
return 1
elif keysymvalue(x[2]) < keysymvalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymvalue(x[3]) > keysymvalue(y[3]):
return 1
elif keysymvalue(x[3]) < keysymvalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymvalue(x[4]) > keysymvalue(y[4]):
return 1
elif keysymvalue(x[4]) < keysymvalue(y[4]):
return -1
else:
return 0
def sequence_unicode_cmp(x, y):
if keysymunicodevalue(x[0]) > keysymunicodevalue(y[0]):
return 1
elif keysymunicodevalue(x[0]) < keysymunicodevalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymunicodevalue(x[1]) > keysymunicodevalue(y[1]):
return 1
elif keysymunicodevalue(x[1]) < keysymunicodevalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymunicodevalue(x[2]) > keysymunicodevalue(y[2]):
return 1
elif keysymunicodevalue(x[2]) < keysymunicodevalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymunicodevalue(x[3]) > keysymunicodevalue(y[3]):
return 1
elif keysymunicodevalue(x[3]) < keysymunicodevalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymunicodevalue(x[4]) > keysymunicodevalue(y[4]):
return 1
elif keysymunicodevalue(x[4]) < keysymunicodevalue(y[4]):
return -1
else:
return 0
def sequence_algorithmic_cmp(x, y):
if len(x) < len(y):
return -1
elif len(x) > len(y):
return 1
else:
for i in range(len(x)):
if x[i] < y[i]:
return -1
elif x[i] > y[i]:
return 1
return 0
xorg_compose_sequences.sort(sequence_cmp)
xorg_compose_sequences_uniqued = []
first_time = True
item = None
for next_item in xorg_compose_sequences:
if first_time:
first_time = False
item = next_item
if sequence_unicode_cmp(item, next_item) != 0:
xorg_compose_sequences_uniqued.append(item)
item = next_item
xorg_compose_sequences = copy(xorg_compose_sequences_uniqued)
counter_multikey = 0
for item in xorg_compose_sequences:
if findall('Multi_key', "".join(item[:-1])) != []:
counter_multikey += 1
xorg_compose_sequences_algorithmic.sort(sequence_algorithmic_cmp)
xorg_compose_sequences_algorithmic_uniqued = uniq(xorg_compose_sequences_algorithmic)
firstitem = ""
num_first_keysyms = 0
zeroes = 0
num_entries = 0
num_algorithmic_greek = 0
for sequence in xorg_compose_sequences:
if keysymvalue(firstitem) != keysymvalue(sequence[0]):
firstitem = sequence[0]
num_first_keysyms += 1
zeroes += 6 - len(sequence) + 1
num_entries += 1
for sequence in xorg_compose_sequences_algorithmic_uniqued:
ch = ord(sequence[-1:][0])
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
num_algorithmic_greek += 1
if opt_algorithmic:
for sequence in xorg_compose_sequences_algorithmic_uniqued:
letter = "".join(sequence[-1:])
print '0x%(cp)04X, %(uni)s, seq: [ <0x%(base)04X>,' % { 'cp': ord(unicode(letter)), 'uni': letter.encode('utf-8'), 'base': sequence[-2] },
for elem in sequence[:-2]:
print "<0x%(keysym)04X>," % { 'keysym': elem },
""" Yeah, verified... We just want to keep the output similar to -u, so we can compare/sort easily """
print "], recomposed as", letter.encode('utf-8'), "verified"
def num_of_keysyms(seq):
return len(seq) - 1
def convert_UnotationToHex(arg):
if isinstance(arg, str):
if match('^U[0-9A-F][0-9A-F][0-9A-F][0-9A-F]$', arg):
return sub('^U', '0x', arg)
return arg
def addprefix_GDK(arg):
if match('^0x', arg):
return '%(arg)s, ' % { 'arg': arg }
else:
return 'GDK_KEY_%(arg)s, ' % { 'arg': arg }
if opt_gtk:
first_keysym = ""
sequence = []
compose_table = []
ct_second_part = []
ct_sequence_width = 2
start_offset = num_first_keysyms * (WIDTHOFCOMPOSETABLE+1)
we_finished = False
counter = 0
sequence_iterator = iter(xorg_compose_sequences)
sequence = sequence_iterator.next()
while True:
first_keysym = sequence[0] # Set the first keysym
compose_table.append([first_keysym, 0, 0, 0, 0, 0])
while sequence[0] == first_keysym:
compose_table[counter][num_of_keysyms(sequence)-1] += 1
try:
sequence = sequence_iterator.next()
except StopIteration:
we_finished = True
break
if we_finished:
break
counter += 1
ct_index = start_offset
for line_num in range(len(compose_table)):
for i in range(WIDTHOFCOMPOSETABLE):
occurences = compose_table[line_num][i+1]
compose_table[line_num][i+1] = ct_index
ct_index += occurences * (i+2)
for sequence in xorg_compose_sequences:
ct_second_part.append(map(convert_UnotationToHex, sequence))
print headerfile_start
for i in compose_table:
if opt_gtkexpanded:
print "0x%(ks)04X," % { "ks": keysymvalue(i[0]) },
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i[1:])) }
elif not match('^0x', i[0]):
print 'GDK_KEY_%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
else:
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
for i in ct_second_part:
if opt_numeric:
for ks in i[1:][:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
for ks in i[:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
elif opt_gtkexpanded:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1])), 'cp':i[-1] }
else:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1][1:])), 'cp':i[-1] }
print headerfile_end
def redecompose(codepoint):
(name, decomposition, combiningclass) = unicodedatabase[codepoint]
if decomposition[0] == '' or decomposition[0] == '0':
return [codepoint]
if match('<\w+>', decomposition[0]):
numdecomposition = map(stringtohex, decomposition[1:])
return map(redecompose, numdecomposition)
numdecomposition = map(stringtohex, decomposition)
return map(redecompose, numdecomposition)
def process_unicodedata_file(verbose = False):
""" Grab from wget http://www.unicode.org/Public/UNIDATA/UnicodeData.txt """
filename_unicodedatatxt = download_file(URL_UNICODEDATATXT)
try:
unicodedatatxt = open(filename_unicodedatatxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
for line in unicodedatatxt.readlines():
if line[0] == "" or line[0] == '#':
continue
line = line[:-1]
uniproperties = split(';', line)
codepoint = stringtohex(uniproperties[0])
""" We don't do Plane 1 or CJK blocks. The latter require reading additional files. """
if codepoint > 0xFFFF or (codepoint >= 0x4E00 and codepoint <= 0x9FFF) or (codepoint >= 0xF900 and codepoint <= 0xFAFF):
continue
name = uniproperties[1]
category = uniproperties[2]
combiningclass = uniproperties[3]
decomposition = uniproperties[5]
unicodedatabase[codepoint] = [name, split('\s+', decomposition), combiningclass]
counter_combinations = 0
counter_combinations_greek = 0
counter_entries = 0
counter_entries_greek = 0
for item in unicodedatabase.keys():
(name, decomposition, combiningclass) = unicodedatabase[item]
if decomposition[0] == '':
continue
print name, "is empty"
elif match('<\w+>', decomposition[0]):
continue
print name, "has weird", decomposition[0]
else:
sequence = map(stringtohex, decomposition)
chrsequence = map(unichr, sequence)
normalized = normalize('NFC', "".join(chrsequence))
""" print name, sequence, "Combining: ", "".join(chrsequence), normalized, len(normalized), """
decomposedsequence = []
for subseq in map(redecompose, sequence):
for seqitem in subseq:
if isinstance(seqitem, list):
for i in seqitem:
if isinstance(i, list):
for j in i:
decomposedsequence.append(j)
else:
decomposedsequence.append(i)
else:
decomposedsequence.append(seqitem)
recomposedchar = normalize('NFC', "".join(map(unichr, decomposedsequence)))
if len(recomposedchar) == 1 and len(decomposedsequence) > 1:
counter_entries += 1
counter_combinations += factorial(len(decomposedsequence)-1)
ch = item
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
counter_entries_greek += 1
counter_combinations_greek += factorial(len(decomposedsequence)-1)
if verbose:
print "0x%(cp)04X, %(uni)c, seq:" % { 'cp':item, 'uni':unichr(item) },
print "[",
for elem in decomposedsequence:
print '<0x%(hex)04X>,' % { 'hex': elem },
print "], recomposed as", recomposedchar,
if unichr(item) == recomposedchar:
print "verified"
if verbose == False:
print "Unicode statistics from UnicodeData.txt"
print "Number of entries that can be algorithmically produced :", counter_entries
print " of which are for Greek :", counter_entries_greek
print "Number of compose sequence combinations requiring :", counter_combinations
print " of which are for Greek :", counter_combinations_greek
print "Note: We do not include partial compositions, "
print "thus the slight discrepancy in the figures"
print
if opt_unicodedatatxt:
process_unicodedata_file(True)
if opt_statistics:
print
print "Total number of compose sequences (from file) :", len(xorg_compose_sequences) + len(xorg_compose_sequences_algorithmic)
print " of which can be expressed algorithmically :", len(xorg_compose_sequences_algorithmic)
print " of which cannot be expressed algorithmically :", len(xorg_compose_sequences)
print " of which have Multi_key :", counter_multikey
print
print "Algorithmic (stats for Xorg Compose file)"
print "Number of sequences off due to algo from file (len(array)) :", len(xorg_compose_sequences_algorithmic)
print "Number of sequences off due to algo (uniq(sort(array))) :", len(xorg_compose_sequences_algorithmic_uniqued)
print " of which are for Greek :", num_algorithmic_greek
print
process_unicodedata_file()
print "Not algorithmic (stats from Xorg Compose file)"
print "Number of sequences :", len(xorg_compose_sequences)
print "Flat array looks like :", len(xorg_compose_sequences), "rows of 6 integers (2 bytes per int, or 12 bytes per row)"
print "Flat array would have taken up (in bytes) :", num_entries * 2 * 6, "bytes from the GTK+ library"
print "Number of items in flat array :", len(xorg_compose_sequences) * 6
print " of which are zeroes :", zeroes, "or ", (100 * zeroes) / (len(xorg_compose_sequences) * 6), " per cent"
print "Number of different first items :", num_first_keysyms
print "Number of max bytes (if using flat array) :", num_entries * 2 * 6
print "Number of savings :", zeroes * 2 - num_first_keysyms * 2 * 5
print
print "Memory needs if both algorithmic+optimised table in latest Xorg compose file"
print " :", num_entries * 2 * 6 - zeroes * 2 + num_first_keysyms * 2 * 5
print
print "Existing (old) implementation in GTK+"
print "Number of sequences in old gtkimcontextsimple.c :", 691
print "The existing (old) implementation in GTK+ takes up :", 691 * 2 * 12, "bytes"
| bsd-3-clause | 6,037,398,277,350,444,000 | 34.408247 | 159 | 0.650236 | false |
byndcivilization/toy-infrastructure | flask-app/venv/lib/python3.6/site-packages/pip/utils/ui.py | 490 | 11597 | from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
import time
import contextlib
import logging
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import (WritelnMixin,
HIDE_CURSOR, SHOW_CURSOR)
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| gpl-3.0 | 891,597,668,929,074,700 | 32.712209 | 79 | 0.626972 | false |
DavidNorman/tensorflow | tensorflow/python/kernel_tests/proto/proto_op_test_base.py | 22 | 16705 | # =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test case base for testing proto operations."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import os
from tensorflow.core.framework import types_pb2
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.platform import test
class ProtoOpTestBase(test.TestCase):
"""Base class for testing proto decoding and encoding ops."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(ProtoOpTestBase, self).__init__(methodName)
lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
if os.path.isfile(lib):
ct.cdll.LoadLibrary(lib)
@staticmethod
def named_parameters(extension=True):
parameters = [("defaults", ProtoOpTestBase.defaults_test_case()),
("minmax", ProtoOpTestBase.minmax_test_case()),
("nested", ProtoOpTestBase.nested_test_case()),
("optional", ProtoOpTestBase.optional_test_case()),
("promote", ProtoOpTestBase.promote_test_case()),
("ragged", ProtoOpTestBase.ragged_test_case()),
("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
("simple", ProtoOpTestBase.simple_test_case())]
if extension:
parameters.append(("extension", ProtoOpTestBase.extension_test_case()))
return parameters
@staticmethod
def defaults_test_case():
test_case = test_example_pb2.TestCase()
test_case.values.add() # No fields specified, so we get all defaults.
test_case.shapes.append(1)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value_with_default"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(1.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "float_value_with_default"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(2.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(3)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(11)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(13)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(6)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(5)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(10)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(12)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(9)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(7)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bool_value_with_default"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "string_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a")
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bytes_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a longer default string")
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "enum_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.enum_value.append(test_example_pb2.Color.GREEN)
return test_case
@staticmethod
def minmax_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(-1.7976931348623158e+308)
value.double_value.append(2.2250738585072014e-308)
value.double_value.append(1.7976931348623158e+308)
value.float_value.append(-3.402823466e+38)
value.float_value.append(1.175494351e-38)
value.float_value.append(3.402823466e+38)
value.int64_value.append(-9223372036854775808)
value.int64_value.append(9223372036854775807)
value.sfixed64_value.append(-9223372036854775808)
value.sfixed64_value.append(9223372036854775807)
value.sint64_value.append(-9223372036854775808)
value.sint64_value.append(9223372036854775807)
value.uint64_value.append(0)
value.uint64_value.append(18446744073709551615)
value.fixed64_value.append(0)
value.fixed64_value.append(18446744073709551615)
value.int32_value.append(-2147483648)
value.int32_value.append(2147483647)
value.sfixed32_value.append(-2147483648)
value.sfixed32_value.append(2147483647)
value.sint32_value.append(-2147483648)
value.sint32_value.append(2147483647)
value.uint32_value.append(0)
value.uint32_value.append(4294967295)
value.fixed32_value.append(0)
value.fixed32_value.append(4294967295)
value.bool_value.append(False)
value.bool_value.append(True)
value.string_value.append("")
value.string_value.append("I refer to the infinite.")
test_case.shapes.append(1)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(-1.7976931348623158e+308)
field.value.double_value.append(2.2250738585072014e-308)
field.value.double_value.append(1.7976931348623158e+308)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "float_value"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(-3.402823466e+38)
field.value.float_value.append(1.175494351e-38)
field.value.float_value.append(3.402823466e+38)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(False)
field.value.bool_value.append(True)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "string_value"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("")
field.value.string_value.append("I refer to the infinite.")
return test_case
@staticmethod
def nested_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.message_value.add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "message_value"
field.dtype = types_pb2.DT_STRING
message_value = field.value.message_value.add()
message_value.double_value = 23.5
return test_case
@staticmethod
def optional_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.bool_value.append(True)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(0.0)
return test_case
@staticmethod
def promote_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.sint32_value.append(2147483647)
value.sfixed32_value.append(2147483647)
value.int32_value.append(2147483647)
value.fixed32_value.append(4294967295)
value.uint32_value.append(4294967295)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
return test_case
@staticmethod
def ragged_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.double_value.append(123.0)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(3.1)
value.bool_value.append(False)
test_case.shapes.append(2)
test_case.sizes.append(2)
test_case.sizes.append(1)
test_case.sizes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(123.0)
field.value.double_value.append(3.1)
field.value.double_value.append(0.0)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
return test_case
@staticmethod
def shaped_batch_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(44.0)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(3.14159)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(1.414)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(-32.2)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(0.0001)
value.bool_value.append(True)
test_case.shapes.append(3)
test_case.shapes.append(2)
for _ in range(12):
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(44.0)
field.value.double_value.append(3.14159)
field.value.double_value.append(1.414)
field.value.double_value.append(-32.2)
field.value.double_value.append(0.0001)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
return test_case
@staticmethod
def extension_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = test_example_pb2.ext_value.full_name
field.dtype = types_pb2.DT_STRING
message_value = field.value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
return test_case
@staticmethod
def simple_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
value.enum_value.append(test_example_pb2.Color.INDIGO)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "enum_value"
field.dtype = types_pb2.DT_INT32
field.value.enum_value.append(test_example_pb2.Color.INDIGO)
return test_case
| apache-2.0 | 1,390,804,497,032,073,200 | 36.455157 | 79 | 0.687698 | false |
WPMedia/dd-agent | utils/proxy.py | 8 | 2497 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import os
from urllib import getproxies
from urlparse import urlparse
log = logging.getLogger(__name__)
def set_no_proxy_settings():
"""
Starting with Agent 5.0.0, there should always be a local forwarder
running and all payloads should go through it. So we should make sure
that we pass the no_proxy environment variable that will be used by requests
See: https://github.com/kennethreitz/requests/pull/945
"""
to_add = ["127.0.0.1", "localhost", "169.254.169.254"]
no_proxy = os.environ.get("no_proxy", "")
if not no_proxy.strip():
no_proxy = []
else:
no_proxy = no_proxy.split(',')
for host in to_add:
if host not in no_proxy:
no_proxy.append(host)
os.environ['no_proxy'] = ','.join(no_proxy)
def get_proxy(agentConfig):
proxy_settings = {}
# First we read the proxy configuration from datadog.conf
proxy_host = agentConfig.get('proxy_host')
if proxy_host is not None:
proxy_settings['host'] = proxy_host
try:
proxy_settings['port'] = int(agentConfig.get('proxy_port', 3128))
except ValueError:
log.error('Proxy port must be an Integer. Defaulting it to 3128')
proxy_settings['port'] = 3128
proxy_settings['user'] = agentConfig.get('proxy_user')
proxy_settings['password'] = agentConfig.get('proxy_password')
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
# If no proxy configuration was specified in datadog.conf
# We try to read it from the system settings
try:
proxy = getproxies().get('https')
if proxy is not None:
parse = urlparse(proxy)
proxy_settings['host'] = parse.hostname
proxy_settings['port'] = int(parse.port)
proxy_settings['user'] = parse.username
proxy_settings['password'] = parse.password
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
except Exception as e:
log.debug("Error while trying to fetch proxy settings using urllib %s."
"Proxy is probably not set", str(e))
return None
| bsd-3-clause | 1,284,470,710,304,016,100 | 33.680556 | 80 | 0.61674 | false |
LinusU/ansible | lib/ansible/cli/vault.py | 52 | 5972 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import os
import sys
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.utils.display import Display
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
def __init__(self, args, display=None):
self.vault_pass = None
super(VaultCLI, self).__init__(args, display)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt']
if self.action not in can_output:
if self.options.output_file:
raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
if len(self.args) == 0:
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
else:
# This restriction should remain in place until it's possible to
# load multiple YAML records from a single file, or it's too easy
# to create an encrypted file that can't be read back in. But in
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
# a workaround.
if self.options.output_file and len(self.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
if self.options.new_vault_password_file:
# for rekey only
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
self.editor = VaultEditor(self.vault_pass)
self.execute()
def execute_encrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
self.display.display("Reading plaintext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.encrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
self.display.display("Encryption successful", stderr=True)
def execute_decrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
self.display.display("Reading ciphertext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.decrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
self.display.display("Decryption successful", stderr=True)
def execute_create(self):
if len(self.args) > 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(self.args[0])
def execute_edit(self):
for f in self.args:
self.editor.edit_file(f)
def execute_view(self):
for f in self.args:
self.editor.view_file(f)
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
if self.new_vault_pass:
new_password = self.new_vault_pass
else:
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
self.editor.rekey_file(f, new_password)
self.display.display("Rekey successful", stderr=True)
| gpl-3.0 | 8,620,554,293,985,795,000 | 37.282051 | 130 | 0.63714 | false |
knuevena/americorps-backend | orgmember.py | 1 | 4018 | from user import User
from db import Base, Session
from sqlalchemy import *
from sqlalchemy.orm import relation, sessionmaker
from datetime import datetime, date
from attendee import Attendee
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json
from sqlalchemy import exc
from event import Event
import organization
class OrgMember(User):
__tablename__ = "orgmembers"
__mapper_args__ = {'polymorphic_identity': 'orgmember'}
id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
# the OrgMember will have all User fields
org = Column(Integer, ForeignKey('organizations.id'), nullable=False) # object or id?
poc = Column(Boolean, nullable=False)
@classmethod
def fromdict(cls, d):
allowed = ('name', 'email', 'passwordhash', 'phone', 'last_active', 'birthdate',
'bio', 'gender', 'org', 'poc')
df = {k: v for k, v in d.items() if k in allowed}
return cls(**df)
def asdict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
result = getattr(self, key)
if isinstance(result, date):
dict_[key] = str(result)
else:
dict_[key] = result
return dict_
def __init__(self, name, email, passwordhash, phone, poc, org, birthdate=None,
bio=None, gender=None):
self.name = name
self.email = email
self.set_password(passwordhash)
if len(phone) > 15 :
raise ValueError("phone number is too long")
elif len(phone) < 10:
raise ValueError("phone number is too short")
elif phone.isdigit() == False:
raise ValueError("phone number must be a string of digits")
else:
self.phone = phone
self.poc = poc
self.last_activity = datetime.now()
self.birthdate = birthdate
self.bio = bio
self.gender = gender
self.org = org
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
# create a volunteer from a json blob
def getOrgMember(self, id):
s = Session()
content = s.query(OrgMember).filter_by(id=id).first()
s.close()
if content:
return content
else:
raise ValueError("user does not exist")
def confirmAttendee(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.confirmed = True
s.commit()
s.close()
return True
else:
return False
def validateHour(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.hoursValidated = True
s.commit()
s.close()
return True
else:
return False
def deleteSelf(self, session):
s = session
try:
s.delete(self)
except:
raise exc.SQLAlchemyError("failed to delete orgMember " + self.id)
def link_org(orgmember):
s = Session()
o2_org = orgmember.org
org_m = s.query(OrgMember).filter_by(email=orgmember.email).first()
s.close()
if org_m:
org_id = org_m.id
else :
print (exc.InvalidRequestError("query failed"))
return False
json2 = json.dumps({'poc': org_id})
organization.updateOrg(o2_org, json2)
return True
def createMember(json):
o = OrgMember.fromdict(json)
s = Session()
try:
s.add(o)
s.commit()
except:
return False
finally:
s.close()
o2 = OrgMember.fromdict(json)
if link_org(o2):
return True
else:
return False
| mit | -7,297,201,248,992,420,000 | 28.985075 | 90 | 0.583126 | false |
linkedin/indextank-service | api/boto/ec2/__init__.py | 10 | 2038 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
service from AWS.
"""
from boto.ec2.connection import EC2Connection
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the EC2Connection
object's constructor as keyword arguments and they will be
passed along to the EC2Connection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
c = EC2Connection(**kw_params)
return c.get_all_regions()
def connect_to_region(region_name, **kw_params):
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
return None
def get_region(region_name, **kw_params):
for region in regions(**kw_params):
if region.name == region_name:
return region
return None
| apache-2.0 | 5,631,090,319,108,407,000 | 38.192308 | 74 | 0.721786 | false |
apagac/cfme_tests | cfme/tests/infrastructure/test_vm_power_control.py | 1 | 27862 | # -*- coding: utf-8 -*-
import random
import time
import pytest
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.base.login import BaseLoggedInPage
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.rest.gen_data import users as _users
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.long_running,
pytest.mark.tier(2),
pytest.mark.usefixtures('setup_provider'),
test_requirements.power,
pytest.mark.provider([InfraProvider], scope='class'),
]
@pytest.fixture(scope='function')
def vm_name():
return random_vm_name('pwr-c')
@pytest.fixture(scope="function")
def testing_vm(appliance, provider, vm_name):
"""Fixture to provision vm to the provider being tested"""
vm = appliance.collections.infra_vms.instantiate(vm_name, provider)
if not provider.mgmt.does_vm_exist(vm.name):
logger.info("deploying %s on provider %s", vm.name, provider.key)
vm.create_on_provider(allow_skip="default", find_in_cfme=True)
yield vm
vm.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
@pytest.fixture(scope="function")
def archived_vm(testing_vm):
"""Fixture to archive testing VM"""
testing_vm.mgmt.delete()
testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720,
from_details=False, from_any_provider=True)
@pytest.fixture(scope="function")
def orphaned_vm(provider, testing_vm):
"""Fixture to orphane VM by removing provider from CFME"""
provider.delete_if_exists(cancel=False)
testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720,
from_details=False, from_any_provider=True)
@pytest.fixture(scope="function")
def testing_vm_tools(appliance, provider, vm_name, full_template):
"""Fixture to provision vm with preinstalled tools to the provider being tested"""
vm = appliance.collections.infra_vms.instantiate(vm_name, provider, full_template.name)
if not provider.mgmt.does_vm_exist(vm.name):
logger.info("deploying %s on provider %s", vm.name, provider.key)
vm.create_on_provider(allow_skip="default", find_in_cfme=True)
yield vm
vm.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
def if_scvmm_refresh_provider(provider):
# No eventing from SCVMM so force a relationship refresh
if provider.one_of(SCVMMProvider):
provider.refresh_provider_relationships()
def check_power_options(provider, soft_assert, vm, power_state):
must_be_available = {
'on': [vm.POWER_OFF, vm.SUSPEND, vm.RESET],
'off': [vm.POWER_ON]
}
mustnt_be_available = {
'on': [vm.POWER_ON],
'off': [vm.POWER_OFF, vm.SUSPEND, vm.RESET]
}
# VMware and RHEVM have extended power options
if not provider.one_of(SCVMMProvider):
mustnt_be_available['off'].extend([vm.GUEST_RESTART, vm.GUEST_SHUTDOWN])
if not provider.one_of(SCVMMProvider, RHEVMProvider):
mustnt_be_available['on'].extend([vm.GUEST_RESTART, vm.GUEST_SHUTDOWN])
if provider.one_of(RHEVMProvider):
must_be_available['on'].remove(vm.RESET)
view = navigate_to(vm, 'Details')
power_dropdown = view.toolbar.power
for pwr_option in must_be_available[power_state]:
soft_assert(power_dropdown.item_enabled(pwr_option),
"'{}' must be available in current power state - '{}' ".format(pwr_option,
power_state))
for pwr_option in mustnt_be_available[power_state]:
pwr_state = power_dropdown.has_item(pwr_option) and power_dropdown.item_enabled(pwr_option)
soft_assert(not pwr_state,
"'{}' must not be available in current power state - '{}' ".format(pwr_option,
power_state))
def wait_for_last_boot_timestamp_refresh(vm, boot_time, timeout=300):
"""Timestamp update doesn't happen with state change so need a longer
wait when expecting a last boot timestamp change"""
view = navigate_to(vm, "Details")
def _wait_for_timestamp_refresh():
cur_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
return boot_time != cur_boot_time
try:
wait_for(_wait_for_timestamp_refresh, num_sec=timeout, delay=30,
fail_func=view.toolbar.reload.click)
return True
except TimedOutError:
return False
def ensure_state_changed_on_unchanged(vm, state_changed_on):
"""Returns True if current value of State Changed On in the Power Management
is the same as the supplied (original) value."""
view = navigate_to(vm, "Details")
new_state_changed_on = view.entities.summary("Power Management").get_text_of("State Changed On")
return state_changed_on == new_state_changed_on
def wait_for_vm_tools(vm, timeout=300):
"""Sometimes test opens VM details before it gets loaded and can't verify if vmtools are
installed"""
view = navigate_to(vm, "Details")
def _wait_for_tools_ok():
return view.entities.summary("Properties").get_text_of("Platform Tools") == 'toolsOk'
try:
wait_for(_wait_for_tools_ok, num_sec=timeout, delay=10, fail_func=view.toolbar.reload.click)
except TimedOutError:
return False
class TestControlOnQuadicons(object):
@pytest.mark.rhv3
def test_power_off_cancel(self, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off cancel
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=True)
if_scvmm_refresh_provider(testing_vm.provider)
# TODO: assert no event.
time.sleep(60)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'on')
soft_assert(
testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv1
def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Stop initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=900)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'off')
soft_assert(not testing_vm.mgmt.is_running, "vm running")
@pytest.mark.rhv3
def test_power_on_cancel(self, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on cancel
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=True)
if_scvmm_refresh_provider(testing_vm.provider)
time.sleep(60)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'off')
soft_assert(not testing_vm.mgmt.is_running, "vm running")
@pytest.mark.rhv1
@pytest.mark.tier(1)
def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=900)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'on')
soft_assert(testing_vm.mgmt.is_running, "vm not running")
class TestVmDetailsPowerControlPerProvider(object):
@pytest.mark.rhv3
def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False,
from_details=True)
view.flash.assert_success_message(text='Stop initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
soft_assert(not testing_vm.mgmt.is_running, "vm running")
# BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604
if not testing_vm.provider.one_of(RHEVMProvider):
new_last_boot_time = view.entities.summary("Power Management").get_text_of(
"Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.mark.rhv3
def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False,
from_details=True)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
soft_assert(testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv3
@pytest.mark.meta(automates=[BZ(1174858)])
def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests suspend
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
Bugzilla:
1174858
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND,
cancel=False,
from_details=True)
view.flash.assert_success_message(text='Suspend initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_SUSPENDED,
timeout=450,
from_details=True)
soft_assert(testing_vm.mgmt.is_suspended, "vm not suspended")
if not testing_vm.provider.one_of(RHEVMProvider):
new_last_boot_time = view.entities.summary("Power Management").get_text_of(
"Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.mark.rhv1
def test_start_from_suspend(self, appliance, testing_vm, ensure_vm_suspended, soft_assert):
"""Tests start from suspend
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
try:
testing_vm.provider.refresh_provider_relationships()
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True)
except TimedOutError:
if testing_vm.provider.one_of(RHEVMProvider):
logger.warning('working around bz1174858, ignoring timeout')
else:
raise
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False,
from_details=True)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600)
soft_assert(testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv3
def test_no_template_power_control(provider, soft_assert):
""" Ensures that no power button is displayed for templates.
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
setup:
1. An infra provider that has some templates.
testSteps:
1. Open the view of all templates of the provider
2. Verify the Power toolbar button is not visible
3. Select some template using the checkbox
4. Verify the Power toolbar button is not visible
5. Click on some template to get into the details page
6. Verify the Power toolbar button is not visible
Bugzilla:
1496383
1634713
"""
view = navigate_to(provider, 'ProviderTemplates')
view.toolbar.view_selector.select('Grid View')
soft_assert(not view.toolbar.power.is_displayed, "Power displayed in template grid view!")
# Ensure selecting a template doesn't cause power menu to appear
templates = view.entities.all_entity_names
template_name = random.choice(templates)
selected_template = provider.appliance.collections.infra_templates.instantiate(template_name,
provider)
# Check the power button with checking the quadicon
view = navigate_to(selected_template, 'AllForProvider', use_resetter=False)
entity = view.entities.get_entity(name=selected_template.name, surf_pages=True)
entity.check()
for action in view.toolbar.power.items:
# Performing power actions on template
view.toolbar.power.item_select(action, handle_alert=True)
if action == 'Power On':
action = 'Start'
elif action == 'Power Off':
action = 'Stop'
view.flash.assert_message('{} action does not apply to selected items'.format(action))
view.flash.dismiss()
# Ensure there isn't a power button on the details page
entity.click()
soft_assert(not view.toolbar.power.is_displayed, "Power displayed in template details!")
@pytest.mark.rhv3
@pytest.mark.meta(
blockers=[
BZ(
1723805,
unblock=lambda provider: not provider.one_of(SCVMMProvider),
)
]
)
def test_no_power_controls_on_archived_vm(appliance, testing_vm, archived_vm, soft_assert):
""" Ensures that no power button is displayed from details view of archived vm
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
setup:
1. Archived VM should be available
testSteps:
1. Open the view of VM Details
2. Verify the Power toolbar button is not visible
Bugzilla:
1520489
1659340
"""
view = navigate_to(testing_vm, 'AnyProviderDetails', use_resetter=False)
status = getattr(view.toolbar.power, "is_enabled")
assert not status, "Power displayed in archived VM's details!"
@pytest.mark.rhv3
def test_archived_vm_status(testing_vm, archived_vm):
"""Tests archived vm status
Metadata:
test_flag: inventory
Polarion:
assignee: ghubale
casecomponent: Infra
caseimportance: high
initialEstimate: 1/8h
tags: power
"""
vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state']
assert (vm_state == 'archived')
@pytest.mark.rhv3
def test_orphaned_vm_status(testing_vm, orphaned_vm):
"""Tests orphaned vm status
Polarion:
assignee: ghubale
initialEstimate: 1/10h
casecomponent: Infra
tags: power
"""
vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state']
assert (vm_state == 'orphaned')
@pytest.mark.rhv1
def test_vm_power_options_from_on(provider, soft_assert, testing_vm, ensure_vm_running):
"""Tests vm power options from on
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/4h
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_ON)
@pytest.mark.rhv3
@pytest.mark.meta(automates=[BZ(1724062)])
def test_vm_power_options_from_off(provider, soft_assert, testing_vm, ensure_vm_stopped):
"""Tests vm power options from off
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/4h
Bugzilla:
1724062
"""
# TODO([email protected]): Update this test case with power options(shutdown and restart guest)
# for scvmm provider
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_OFF)
@pytest.mark.provider([VMwareProvider, RHEVMProvider], override=True, scope='function')
@pytest.mark.meta(automates=[1571830, 1650506])
def test_guest_os_reset(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert):
"""Tests vm guest os reset
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
tags: power
Bugzilla:
1571830
1650506
"""
# TODO([email protected]): Update this test case for power operation(restart guest) for scvmm
wait_for_vm_tools(testing_vm_tools)
view = navigate_to(testing_vm_tools, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
state_changed_on = view.entities.summary("Power Management").get_text_of("State Changed On")
testing_vm_tools.power_control_from_cfme(
option=testing_vm_tools.GUEST_RESTART, cancel=False, from_details=True)
view.flash.assert_success_message(text='Restart Guest initiated', partial=True)
if not (provider.one_of(RHEVMProvider) and BZ(1571830, forced_streams=["5.10", "5.11"]).blocks):
soft_assert(
wait_for_last_boot_timestamp_refresh(testing_vm_tools, last_boot_time),
"Last Boot Time value has not been refreshed",
)
soft_assert(
ensure_state_changed_on_unchanged(testing_vm_tools, state_changed_on),
"Value of 'State Changed On' has changed after guest restart",
)
soft_assert(testing_vm_tools.mgmt.is_running, "vm not running")
@pytest.mark.meta(automates=[1723485, 1571895, 1650506])
@pytest.mark.provider([VMwareProvider, RHEVMProvider], override=True)
@pytest.mark.meta(blockers=[BZ(1723485, forced_streams=["5.11"],
unblock=lambda provider: not (provider.one_of(RHEVMProvider)
and not provider.version < 4.3))])
def test_guest_os_shutdown(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert):
"""Tests vm guest os reset
Polarion:
assignee: ghubale
initialEstimate: 1/6h
caseimportance: high
casecomponent: Infra
tags: power
Bugzilla:
1723485
1571895
1650506
"""
# TODO([email protected]): Update this test case for power operation(shutdown guest) for scvmm
testing_vm_tools.wait_for_vm_state_change(
desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True)
wait_for_vm_tools(testing_vm_tools)
view = navigate_to(testing_vm_tools, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm_tools.power_control_from_cfme(
option=testing_vm_tools.GUEST_SHUTDOWN, cancel=False, from_details=True)
view.flash.assert_success_message(text='Shutdown Guest initiated', partial=True)
testing_vm_tools.wait_for_vm_state_change(
desired_state=testing_vm_tools.STATE_OFF, timeout=720, from_details=True)
soft_assert(
not testing_vm_tools.mgmt.is_running, "vm running")
# Blocking this assertion for RHEV providers because of BZ(1571895) not fixed yet
if not (BZ(1571895, forced_streams=["5.10", "5.11"]).blocks and provider.one_of(RHEVMProvider)):
new_last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.fixture(scope="function")
def new_user(request, appliance):
user, user_data = _users(request, appliance, group="EvmGroup-vm_user")
yield appliance.collections.users.instantiate(
name=user[0].name,
credential=Credential(principal=user_data[0]["userid"], secret=user_data[0]["password"]),
)
if user[0].exists:
user[0].action.delete()
@pytest.mark.tier(1)
@pytest.mark.meta(automates=[1687597])
@pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, override=True)
def test_retire_vm_with_vm_user_role(new_user, appliance, testing_vm):
"""
Bugzilla:
1687597
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseposneg: positive
startsin: 5.10
casecomponent: Automate
setup:
1. Provision vm
testSteps:
1. Create custom user with 'EvmRole_vm-user' role
2. Retire VM by log-in to custom user
"""
# Log in with new user to retire the vm
with new_user:
view = navigate_to(testing_vm.parent, "All")
view.entities.get_entity(name=testing_vm.name, surf_pages=True).check()
assert view.toolbar.lifecycle.item_enabled("Retire selected items")
testing_vm.retire()
assert testing_vm.wait_for_vm_state_change(desired_state="retired", timeout=720,
from_details=True)
@pytest.fixture(params=['archived', 'orphaned'])
def archive_orphan_vm(request, provider, testing_vm):
"""This fixture is used to create archived or orphaned VM"""
if request.param == "archived":
# Archive VM by retiring it
testing_vm.mgmt.delete()
testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720,
from_details=False, from_any_provider=True)
else:
# Orphan VM by removing provider from CFME
provider.delete_if_exists(cancel=False)
testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720,
from_details=False, from_any_provider=True)
yield request.param, testing_vm
@pytest.mark.meta(automates=[1655477, 1686015])
def test_power_options_on_archived_orphaned_vms_all_page(appliance, archive_orphan_vm):
"""This test case is to check Power option drop-down button is disabled on archived and orphaned
VMs all page. Also it performs the power operations on vm and checked expected flash messages.
Bugzilla:
1655477
1686015
Polarion:
assignee: ghubale
initialEstimate: 1/2h
caseimportance: low
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Control
tags: power
testSteps:
1. Add infrastructure provider
2. Navigate to Archived or orphaned VMs all page
3. Select any VM and click on power option drop-down
"""
infra_vms = appliance.collections.infra_vms
state, testing_vm = archive_orphan_vm
if state == "archived":
view = navigate_to(infra_vms, 'ArchivedAll')
# Selecting particular archived vm
testing_vm.find_quadicon(from_archived_all=True).check()
else:
view = navigate_to(infra_vms, 'OrphanedAll')
# Selecting particular orphaned vm
testing_vm.find_quadicon(from_orphaned_all=True).check()
# After selecting particular archived/orphaned vm; 'Power' drop down gets enabled.
# Reading all the options available in 'power' drop down
for action in view.toolbar.power.items:
# Performing power actions on archived/orphaned vm
view.toolbar.power.item_select(action, handle_alert=True)
if action == 'Power On':
action = 'Start'
elif action == 'Power Off':
action = 'Stop'
view.flash.assert_message(f'{action} action does not apply to selected items')
view.flash.dismiss()
| gpl-2.0 | -5,335,879,599,311,816,000 | 37.643551 | 100 | 0.642524 | false |
NeovaHealth/odoo | addons/stock_account/wizard/stock_return_picking.py | 342 | 2715 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_return_picking(osv.osv_memory):
_inherit = 'stock.return.picking'
_columns = {
'invoice_state': fields.selection([('2binvoiced', 'To be refunded/invoiced'), ('none', 'No invoicing')], 'Invoicing',required=True),
}
def default_get(self, cr, uid, fields, context=None):
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
if pick:
if 'invoice_state' in fields:
if pick.invoice_state=='invoiced':
res.update({'invoice_state': '2binvoiced'})
else:
res.update({'invoice_state': 'none'})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.browse(cr, uid, ids[0], context=context)
new_picking, picking_type_id = super(stock_return_picking, self)._create_returns(cr, uid, ids, context=context)
if data.invoice_state == '2binvoiced':
pick_obj = self.pool.get("stock.picking")
move_obj = self.pool.get("stock.move")
move_ids = [x.id for x in pick_obj.browse(cr, uid, new_picking, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'invoice_state': '2binvoiced'})
return new_picking, picking_type_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,767,616,101,702,959,000 | 44.25 | 140 | 0.610313 | false |
aperigault/ansible | lib/ansible/modules/storage/netapp/na_ontap_cluster.py | 26 | 10668 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_cluster
short_description: NetApp ONTAP cluster - create, join, add license
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create or join or apply licenses to ONTAP clusters
- Cluster join can be performed using only one of the parameters, either cluster_name or cluster_ip_address
options:
state:
description:
- Whether the specified cluster should exist or not.
choices: ['present']
default: present
cluster_name:
description:
- The name of the cluster to manage.
cluster_ip_address:
description:
- IP address of cluster to be joined
license_code:
description:
- License code to be applied to the cluster
license_package:
description:
- License package name of the license to be removed
node_serial_number:
description:
- Serial number of the cluster node
'''
EXAMPLES = """
- name: Create cluster
na_ontap_cluster:
state: present
cluster_name: new_cluster
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Add license from cluster
na_ontap_cluster:
state: present
cluster_name: FPaaS-A300-01
license_code: SGHLQDBBVAAAAAAAAAAAAAAAAAAA
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Join cluster
na_ontap_cluster:
state: present
cluster_ip_address: 10.61.184.181
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Join cluster
na_ontap_cluster:
state: present
cluster_name: FPaaS-A300-01
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
def local_cmp(a, b):
"""
compares with only values and not keys, keys should be the same for both dicts
:param a: dict 1
:param b: dict 2
:return: difference of values in both dicts
"""
diff = [key for key in a if a[key] != b[key]]
return len(diff)
class NetAppONTAPCluster(object):
"""
object initialize and class methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
cluster_name=dict(required=False, type='str'),
cluster_ip_address=dict(required=False, type='str'),
license_code=dict(required=False, type='str'),
license_package=dict(required=False, type='str'),
node_serial_number=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
required_together=[
['license_package', 'node_serial_number']
]
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_licensing_status(self):
"""
Check licensing status
:return: package (key) and licensing status (value)
:rtype: dict
"""
license_status = netapp_utils.zapi.NaElement(
'license-v2-status-list-info')
try:
result = self.server.invoke_successfully(license_status,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error checking license status: %s" %
to_native(error), exception=traceback.format_exc())
return_dictionary = {}
license_v2_status = result.get_child_by_name('license-v2-status')
if license_v2_status:
for license_v2_status_info in license_v2_status.get_children():
package = license_v2_status_info.get_child_content('package')
status = license_v2_status_info.get_child_content('method')
return_dictionary[package] = status
return return_dictionary
def create_cluster(self):
"""
Create a cluster
"""
cluster_create = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-create', **{'cluster-name': self.parameters['cluster_name']})
try:
self.server.invoke_successfully(cluster_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Error 36503 denotes node already being used.
if to_native(error.code) == "36503":
return False
else:
self.module.fail_json(msg='Error creating cluster %s: %s'
% (self.parameters['cluster_name'], to_native(error)),
exception=traceback.format_exc())
return True
def cluster_join(self):
"""
Add a node to an existing cluster
"""
if self.parameters.get('cluster_ip_address') is not None:
cluster_add_node = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-join', **{'cluster-ip-address': self.parameters['cluster_ip_address']})
for_fail_attribute = self.parameters.get('cluster_ip_address')
elif self.parameters.get('cluster_name') is not None:
cluster_add_node = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-join', **{'cluster-name': self.parameters['cluster_name']})
for_fail_attribute = self.parameters.get('cluster_name')
else:
return False
try:
self.server.invoke_successfully(cluster_add_node, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Error 36503 denotes node already being used.
if to_native(error.code) == "36503":
return False
else:
self.module.fail_json(msg='Error adding node to cluster %s: %s'
% (for_fail_attribute, to_native(error)),
exception=traceback.format_exc())
return True
def license_v2_add(self):
"""
Apply a license to cluster
"""
license_add = netapp_utils.zapi.NaElement.create_node_with_children('license-v2-add')
license_add.add_node_with_children('codes', **{'license-code-v2': self.parameters['license_code']})
try:
self.server.invoke_successfully(license_add, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error adding license %s: %s'
% (self.parameters['license_code'], to_native(error)),
exception=traceback.format_exc())
def license_v2_delete(self):
"""
Delete license from cluster
"""
license_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'license-v2-delete', **{'package': self.parameters['license_package'],
'serial-number': self.parameters['node_serial_number']})
try:
self.server.invoke_successfully(license_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting license : %s' % (to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
"""
Autosupport log for cluster
:return:
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_cluster", cserver)
def apply(self):
"""
Apply action to cluster
"""
property_changed = False
create_flag = False
join_flag = False
self.autosupport_log()
license_status = self.get_licensing_status()
if self.module.check_mode:
pass
else:
if self.parameters.get('state') == 'present':
if self.parameters.get('cluster_name') is not None:
create_flag = self.create_cluster()
if not create_flag:
join_flag = self.cluster_join()
if self.parameters.get('license_code') is not None:
self.license_v2_add()
property_changed = True
if self.parameters.get('license_package') is not None and\
self.parameters.get('node_serial_number') is not None:
if license_status.get(str(self.parameters.get('license_package')).lower()) != 'none':
self.license_v2_delete()
property_changed = True
if property_changed:
new_license_status = self.get_licensing_status()
if local_cmp(license_status, new_license_status) == 0:
property_changed = False
changed = property_changed or create_flag or join_flag
self.module.exit_json(changed=changed)
def main():
"""
Create object and call apply
"""
cluster_obj = NetAppONTAPCluster()
cluster_obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,072,040,109,049,848,000 | 36.170732 | 107 | 0.585677 | false |
linked67/p2pool-lire | wstools/TimeoutSocket.py | 293 | 5293 | """Based on code from timeout_socket.py, with some tweaks for compatibility.
These tweaks should really be rolled back into timeout_socket, but it's
not totally clear who is maintaining it at this point. In the meantime,
we'll use a different module name for our tweaked version to avoid any
confusion.
The original timeout_socket is by:
Scott Cotton <[email protected]>
Lloyd Zusman <[email protected]>
Phil Mayes <[email protected]>
Piers Lauder <[email protected]>
Radovan Garabik <[email protected]>
"""
ident = "$Id$"
import string, socket, select, errno
WSAEINVAL = getattr(errno, 'WSAEINVAL', 10022)
class TimeoutSocket:
"""A socket imposter that supports timeout limits."""
def __init__(self, timeout=20, sock=None):
self.timeout = float(timeout)
self.inbuf = ''
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.setblocking(0)
self._rbuf = ''
self._wbuf = ''
def __getattr__(self, name):
# Delegate to real socket attributes.
return getattr(self.sock, name)
def connect(self, *addr):
timeout = self.timeout
sock = self.sock
try:
# Non-blocking mode
sock.setblocking(0)
apply(sock.connect, addr)
sock.setblocking(timeout != 0)
return 1
except socket.error,why:
if not timeout:
raise
sock.setblocking(1)
if len(why.args) == 1:
code = 0
else:
code, why = why
if code not in (
errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK
):
raise
r,w,e = select.select([],[sock],[],timeout)
if w:
try:
apply(sock.connect, addr)
return 1
except socket.error,why:
if len(why.args) == 1:
code = 0
else:
code, why = why
if code in (errno.EISCONN, WSAEINVAL):
return 1
raise
raise TimeoutError('socket connect() timeout.')
def send(self, data, flags=0):
total = len(data)
next = 0
while 1:
r, w, e = select.select([],[self.sock], [], self.timeout)
if w:
buff = data[next:next + 8192]
sent = self.sock.send(buff, flags)
next = next + sent
if next == total:
return total
continue
raise TimeoutError('socket send() timeout.')
def recv(self, amt, flags=0):
if select.select([self.sock], [], [], self.timeout)[0]:
return self.sock.recv(amt, flags)
raise TimeoutError('socket recv() timeout.')
buffsize = 4096
handles = 1
def makefile(self, mode="r", buffsize=-1):
self.handles = self.handles + 1
self.mode = mode
return self
def close(self):
self.handles = self.handles - 1
if self.handles == 0 and self.sock.fileno() >= 0:
self.sock.close()
def read(self, n=-1):
if not isinstance(n, type(1)):
n = -1
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self.recv(max(n, self.buffsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(4096, self.buffsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self.recv(self.buffsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def writelines(self, list):
self.send(''.join(list))
def write(self, data):
self.send(data)
def flush(self):
pass
class TimeoutError(Exception):
pass
| gpl-3.0 | -547,586,595,110,253,060 | 28.569832 | 76 | 0.481579 | false |
sugartom/tensorflow-alien | tensorflow/contrib/bayesflow/__init__.py | 57 | 1871 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import entropy
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['entropy', 'monte_carlo',
'special_math', 'stochastic_gradient_estimators',
'stochastic_graph', 'stochastic_tensor',
'stochastic_variables', 'variational_inference']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 1,181,532,653,937,965,600 | 44.634146 | 82 | 0.737573 | false |
NEricN/RobotCSimulator | Python/App/Lib/site-packages/pip/backwardcompat/__init__.py | 394 | 3756 | """Stuff that differs in different Python versions and platform
distributions."""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname, urlretrieve, pathname2url
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def get_http_message_param(http_message, param, default_value):
return http_message.get_param(param, default_value)
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve, pathname2url
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def get_http_message_param(http_message, param, default_value):
result = http_message.getparam(param)
return result or default_value
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in backwardcompat due to differences on AIX and Jython,
that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError("%s is a symlink; Will not return uid for symlinks" % path)
return file_uid
| apache-2.0 | -3,374,340,698,131,972,000 | 26.217391 | 86 | 0.657881 | false |
localu/metagoofil | hachoir_parser/container/swf.py | 84 | 16477 | """
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
- http://www.half-serious.com/swf/format/
- http://www.anotherbigidea.com/javaswf/
- http://www.gnu.org/software/gnash/
Author: Victor Stinner
Creation date: 29 october 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt16, Int32, UInt32, Int64, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.text_handler import textHandler, filesizeHandler
from hachoir_core.tools import paddingSize, humanFrequency
from hachoir_parser.image.common import RGB
from hachoir_parser.image.jpeg import JpegChunk, JpegFile
from hachoir_core.stream import StringInputStream, ConcatStream
from hachoir_parser.common.deflate import Deflate, has_deflate
from hachoir_parser.container.action_script import parseActionScript, parseABC
import math
# Maximum file size (50 MB)
MAX_FILE_SIZE = 50 * 1024 * 1024
TWIPS = 20
class RECT(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
yield Bits(self, "nbits", 5)
nbits = self["nbits"].value
if not nbits:
raise ParserError("SWF parser: Invalid RECT field size (0)")
yield Bits(self, "xmin", nbits, "X minimum in twips")
yield Bits(self, "xmax", nbits, "X maximum in twips")
yield Bits(self, "ymin", nbits, "Y minimum in twips")
yield Bits(self, "ymax", nbits, "Y maximum in twips")
size = paddingSize(self.current_size, 8)
if size:
yield NullBits(self, "padding", size)
def getWidth(self):
return math.ceil(float(self["xmax"].value) / TWIPS)
def getHeight(self):
return math.ceil(float(self["ymax"].value) / TWIPS)
def createDescription(self):
return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight())
class FixedFloat16(FieldSet):
def createFields(self):
yield UInt8(self, "float_part")
yield UInt8(self, "int_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 256
def parseBackgroundColor(parent, size):
yield RGB(parent, "color")
def bit2hertz(field):
return humanFrequency(5512.5 * (2 ** field.value))
SOUND_CODEC_MP3 = 2
SOUND_CODEC = {
0: "RAW",
1: "ADPCM",
SOUND_CODEC_MP3: "MP3",
3: "Uncompressed",
6: "Nellymoser",
}
class SoundEnvelope(FieldSet):
def createFields(self):
yield UInt8(self, "count")
for index in xrange(self["count"].value):
yield UInt32(self, "mark44[]")
yield UInt16(self, "level0[]")
yield UInt16(self, "level1[]")
def parseSoundBlock(parent, size):
# TODO: Be able to get codec... Need to know last sound "def_sound[]" field
# if not (...)sound_header:
# raise ParserError("Sound block without header")
if True: #sound_header == SOUND_CODEC_MP3:
yield UInt16(parent, "samples")
yield UInt16(parent, "left")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseStartSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "has_in_point")
yield Bit(parent, "has_out_point")
yield Bit(parent, "has_loops")
yield Bit(parent, "has_envelope")
yield Bit(parent, "no_multiple")
yield Bit(parent, "stop_playback")
yield NullBits(parent, "reserved", 2)
if parent["has_in_point"].value:
yield UInt32(parent, "in_point")
if parent["has_out_point"].value:
yield UInt32(parent, "out_point")
if parent["has_loops"].value:
yield UInt16(parent, "loop_count")
if parent["has_envelope"].value:
yield SoundEnvelope(parent, "envelope")
def parseDefineSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "is_stereo")
yield Bit(parent, "is_16bit")
yield textHandler(Bits(parent, "rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt32(parent, "sample_count")
if parent["codec"].value == SOUND_CODEC_MP3:
yield UInt16(parent, "len")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseSoundHeader(parent, size):
yield Bit(parent, "playback_is_stereo")
yield Bit(parent, "playback_is_16bit")
yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz)
yield NullBits(parent, "reserved", 4)
yield Bit(parent, "sound_is_stereo")
yield Bit(parent, "sound_is_16bit")
yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt16(parent, "sample_count")
if parent["codec"].value == 2:
yield UInt16(parent, "latency_seek")
class JpegHeader(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
count = 1
while True:
chunk = JpegChunk(self, "jpeg_chunk[]")
yield chunk
if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI):
break
count += 1
def parseJpeg(parent, size):
yield UInt16(parent, "char_id", "Character identifier")
size -= 2
code = parent["code"].value
if code != Tag.TAG_BITS:
if code == Tag.TAG_BITS_JPEG3:
yield UInt32(parent, "alpha_offset", "Character identifier")
size -= 4
addr = parent.absolute_address + parent.current_size + 16
if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"):
header = JpegHeader(parent, "jpeg_header")
yield header
hdr_size = header.size // 8
size -= hdr_size
else:
hdr_size = 0
if code == Tag.TAG_BITS_JPEG3:
img_size = parent["alpha_offset"].value - hdr_size
else:
img_size = size
else:
img_size = size
yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile)
if code == Tag.TAG_BITS_JPEG3:
size = (parent.size - parent.current_size) // 8
yield RawBytes(parent, "alpha", size, "Image data")
def parseVideoFrame(parent, size):
yield UInt16(parent, "stream_id")
yield UInt16(parent, "frame_num")
if 4 < size:
yield RawBytes(parent, "video_data", size-4)
class Export(FieldSet):
def createFields(self):
yield UInt16(self, "object_id")
yield CString(self, "name")
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
def parseProductInfo(parent, size):
yield Int32(parent, "product_id")
yield Int32(parent, "edition")
yield UInt8(parent, "major_version")
yield UInt8(parent, "minor_version")
yield Int64(parent, "build_number")
yield Int64(parent, "compilation_date")
def parseScriptLimits(parent, size):
yield UInt16(parent, "max_recursion_limit")
yield UInt16(parent, "timeout_seconds", "Seconds of processing until the SWF is considered 'stuck'")
def parseSymbolClass(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield UInt16(parent, "symbol_id[]")
yield CString(parent, "symbol_name[]")
def parseBinaryData(parent, size):
yield UInt16(parent, "data_id")
yield UInt32(parent, "reserved")
if size > 6:
yield RawBytes(parent, "data", size-6)
class Tag(FieldSet):
TAG_BITS = 6
TAG_BITS_JPEG2 = 32
TAG_BITS_JPEG3 = 35
TAG_DO_ABC_DEFINE = 82
TAG_INFO = {
# SWF version 1.0
0: ("end[]", "End", None),
1: ("show_frame[]", "Show frame", None),
2: ("def_shape[]", "Define shape", None),
3: ("free_char[]", "Free character", None),
4: ("place_obj[]", "Place object", None),
5: ("remove_obj[]", "Remove object", None),
6: ("def_bits[]", "Define bits", parseJpeg),
7: ("def_but[]", "Define button", None),
8: ("jpg_table", "JPEG tables", None),
9: ("bkgd_color[]", "Set background color", parseBackgroundColor),
10: ("def_font[]", "Define font", None),
11: ("def_text[]", "Define text", None),
12: ("action[]", "Action script", parseActionScript),
13: ("def_font_info[]", "Define font info", None),
# SWF version 2.0
14: ("def_sound[]", "Define sound", parseDefineSound),
15: ("start_sound[]", "Start sound", parseStartSound),
16: ("stop_sound[]", "Stop sound", None),
17: ("def_but_sound[]", "Define button sound", None),
18: ("sound_hdr", "Sound stream header", parseSoundHeader),
19: ("sound_blk[]", "Sound stream block", parseSoundBlock),
20: ("def_bits_lossless[]", "Define bits lossless", None),
21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg),
22: ("def_shape2[]", "Define shape 2", None),
23: ("def_but_cxform[]", "Define button CXFORM", None),
24: ("protect", "File is protected", None),
# SWF version 3.0
25: ("path_are_ps[]", "Paths are Postscript", None),
26: ("place_obj2[]", "Place object 2", None),
28: ("remove_obj2[]", "Remove object 2", None),
29: ("sync_frame[]", "Synchronize frame", None),
31: ("free_all[]", "Free all", None),
32: ("def_shape3[]", "Define shape 3", None),
33: ("def_text2[]", "Define text 2", None),
34: ("def_but2[]", "Define button2", None),
35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg),
36: ("def_bits_lossless2[]", "Define bits lossless 2", None),
39: ("def_sprite[]", "Define sprite", None),
40: ("name_character[]", "Name character", None),
41: ("product_info", "Generator product info", parseProductInfo),
42: ("generator_text[]", "Generator text", None),
43: ("frame_label[]", "Frame label", None),
45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader),
46: ("def_morph_shape[]", "Define morph shape", None),
47: ("gen_frame[]", "Generate frame", None),
48: ("def_font2[]", "Define font 2", None),
49: ("tpl_command[]", "Template command", None),
# SWF version 4.0
37: ("def_text_field[]", "Define text field", None),
38: ("def_quicktime_movie[]", "Define QuickTime movie", None),
# SWF version 5.0
50: ("def_cmd_obj[]", "Define command object", None),
51: ("flash_generator", "Flash generator", None),
52: ("gen_ext_font[]", "Gen external font", None),
56: ("export[]", "Export", parseExport),
57: ("import[]", "Import", None),
58: ("ebnable_debug", "Enable debug", None),
# SWF version 6.0
59: ("do_init_action[]", "Do init action", None),
60: ("video_str[]", "Video stream", None),
61: ("video_frame[]", "Video frame", parseVideoFrame),
62: ("def_font_info2[]", "Define font info 2", None),
63: ("mx4[]", "MX4", None),
64: ("enable_debug2", "Enable debugger 2", None),
# SWF version 7.0
65: ("script_limits[]", "Script limits", parseScriptLimits),
66: ("tab_index[]", "Set tab index", None),
# SWF version 8.0
69: ("file_attr[]", "File attributes", None),
70: ("place_obj3[]", "Place object 3", None),
71: ("import2[]", "Import a definition list from another movie", None),
73: ("def_font_align[]", "Define font alignment zones", None),
74: ("csm_txt_set[]", "CSM text settings", None),
75: ("def_font3[]", "Define font text 3", None),
77: ("metadata[]", "XML code describing the movie", None),
78: ("def_scale_grid[]", "Define scaling factors", None),
83: ("def_shape4[]", "Define shape 4", None),
84: ("def_morph2[]", "Define a morphing shape 2", None),
# SWF version 9.0
72: ("do_abc[]", "SWF 9 ActionScript container; actions only", parseABC),
76: ("symbol_class[]", "Instantiate objects from a set of classes", parseSymbolClass),
82: ("do_abc_define[]", "SWF 9 ActionScript container; identifier, name, actions", parseABC),
86: ("def_scene_frame[]", "Define raw data for scenes and frames", None),
87: ("def_binary_data[]", "Defines a buffer of any size with any binary user data", parseBinaryData),
88: ("def_font_name[]", "Define the legal font name and copyright", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = self["length"].value
if self[0].name == "length_ext":
self._size = (6+size) * 8
else:
self._size = (2+size) * 8
code = self["code"].value
if code in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[code]
else:
self.parser = None
def createFields(self):
if self.stream.readBits(self.absolute_address, 6, self.endian) == 63:
yield Bits(self, "length_ext", 6)
yield Bits(self, "code", 10)
yield filesizeHandler(UInt32(self, "length"))
else:
yield filesizeHandler(Bits(self, "length", 6))
yield Bits(self, "code", 10)
size = self["length"].value
if 0 < size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Tag: %s (%s)" % (self["code"].display, self["length"].display)
class SwfFile(Parser):
VALID_VERSIONS = set(xrange(1, 10+1))
PARSER_TAGS = {
"id": "swf",
"category": "container",
"file_ext": ["swf"],
"mime": (u"application/x-shockwave-flash",),
"min_size": 64,
"description": u"Macromedia Flash data"
}
PARSER_TAGS["magic"] = []
for version in VALID_VERSIONS:
PARSER_TAGS["magic"].append(("FWS%c" % version, 0))
PARSER_TAGS["magic"].append(("CWS%c" % version, 0))
endian = LITTLE_ENDIAN
SWF_SCALE_FACTOR = 1.0 / 20
def validate(self):
if self.stream.readBytes(0, 3) not in ("FWS", "CWS"):
return "Wrong file signature"
if self["version"].value not in self.VALID_VERSIONS:
return "Unknown version"
if MAX_FILE_SIZE < self["filesize"].value:
return "File too big (%u)" % self["filesize"].value
if self["signature"].value == "FWS":
if self["rect/padding"].value != 0:
return "Unknown rectangle padding value"
return True
def createFields(self):
yield String(self, "signature", 3, "SWF format signature", charset="ASCII")
yield UInt8(self, "version")
yield filesizeHandler(UInt32(self, "filesize"))
if self["signature"].value != "CWS":
yield RECT(self, "rect")
yield FixedFloat16(self, "frame_rate")
yield UInt16(self, "frame_count")
while not self.eof:
yield Tag(self, "tag[]")
else:
size = (self.size - self.current_size) // 8
if has_deflate:
data = Deflate(Bytes(self, "compressed_data", size), False)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5))
args.setdefault("tags",[]).append(("class", SwfFile))
return ConcatStream((header, stream), source=stream.source, **args)
data.setSubIStream(createInputStream)
yield data
else:
yield Bytes(self, "compressed_data", size)
def createDescription(self):
desc = ["version %u" % self["version"].value]
if self["signature"].value == "CWS":
desc.append("compressed")
return u"Macromedia Flash data: %s" % (", ".join(desc))
def createContentSize(self):
if self["signature"].value == "FWS":
return self["filesize"].value * 8
else:
# TODO: Size of compressed Flash?
return None
| gpl-2.0 | 4,026,999,455,771,335,700 | 37.053118 | 109 | 0.588032 | false |
chen0031/Dato-Core | src/unity/python/doc/scripts/doxypy-0.4.2.py | 15 | 14005 | #!/usr/bin/env python
__applicationName__ = "doxypy"
__blurb__ = """
doxypy is an input filter for Doxygen. It preprocesses python
files so that docstrings of classes and functions are reformatted
into Doxygen-conform documentation blocks.
"""
__doc__ = __blurb__ + \
"""
In order to make Doxygen preprocess files through doxypy, simply
add the following lines to your Doxyfile:
FILTER_SOURCE_FILES = YES
INPUT_FILTER = "python /path/to/doxypy.py"
"""
__version__ = "0.4.2"
__date__ = "14th October 2009"
__website__ = "http://code.foosel.org/doxypy"
__author__ = (
"Philippe 'demod' Neumann (doxypy at demod dot org)",
"Gina 'foosel' Haeussge (gina at foosel dot net)"
)
__licenseName__ = "GPL v2"
__license__ = """This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import re
from optparse import OptionParser, OptionGroup
class FSM(object):
"""Implements a finite state machine.
Transitions are given as 4-tuples, consisting of an origin state, a target
state, a condition for the transition (given as a reference to a function
which gets called with a given piece of input) and a pointer to a function
to be called upon the execution of the given transition.
"""
"""
@var transitions holds the transitions
@var current_state holds the current state
@var current_input holds the current input
@var current_transition hold the currently active transition
"""
def __init__(self, start_state=None, transitions=[]):
self.transitions = transitions
self.current_state = start_state
self.current_input = None
self.current_transition = None
def setStartState(self, state):
self.current_state = state
def addTransition(self, from_state, to_state, condition, callback):
self.transitions.append([from_state, to_state, condition, callback])
def makeTransition(self, input):
"""Makes a transition based on the given input.
@param input input to parse by the FSM
"""
for transition in self.transitions:
[from_state, to_state, condition, callback] = transition
if from_state == self.current_state:
match = condition(input)
if match:
self.current_state = to_state
self.current_input = input
self.current_transition = transition
if options.debug:
print >>sys.stderr, "# FSM: executing (%s -> %s) for line '%s'" % (from_state, to_state, input)
callback(match)
return
class Doxypy(object):
def __init__(self):
string_prefixes = "[uU]?[rR]?"
self.start_single_comment_re = re.compile("^\s*%s(''')" % string_prefixes)
self.end_single_comment_re = re.compile("(''')\s*$")
self.start_double_comment_re = re.compile("^\s*%s(\"\"\")" % string_prefixes)
self.end_double_comment_re = re.compile("(\"\"\")\s*$")
self.single_comment_re = re.compile("^\s*%s(''').*(''')\s*$" % string_prefixes)
self.double_comment_re = re.compile("^\s*%s(\"\"\").*(\"\"\")\s*$" % string_prefixes)
self.defclass_re = re.compile("^(\s*)(def .+:|class .+:)")
self.empty_re = re.compile("^\s*$")
self.hashline_re = re.compile("^\s*#.*$")
self.importline_re = re.compile("^\s*(import |from .+ import)")
self.multiline_defclass_start_re = re.compile("^(\s*)(def|class)(\s.*)?$")
self.multiline_defclass_end_re = re.compile(":\s*$")
## Transition list format
# ["FROM", "TO", condition, action]
transitions = [
### FILEHEAD
# single line comments
["FILEHEAD", "FILEHEAD", self.single_comment_re.search, self.appendCommentLine],
["FILEHEAD", "FILEHEAD", self.double_comment_re.search, self.appendCommentLine],
# multiline comments
["FILEHEAD", "FILEHEAD_COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_SINGLE", "FILEHEAD", self.end_single_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE", self.catchall, self.appendCommentLine],
["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD", self.end_double_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
# other lines
["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine],
["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine],
["FILEHEAD", "FILEHEAD", self.importline_re.search, self.appendFileheadLine],
["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
["FILEHEAD", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine],
### DEFCLASS
# single line comments
["DEFCLASS", "DEFCLASS_BODY", self.single_comment_re.search, self.appendCommentLine],
["DEFCLASS", "DEFCLASS_BODY", self.double_comment_re.search, self.appendCommentLine],
# multiline comments
["DEFCLASS", "COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
["COMMENT_SINGLE", "DEFCLASS_BODY", self.end_single_comment_re.search, self.appendCommentLine],
["COMMENT_SINGLE", "COMMENT_SINGLE", self.catchall, self.appendCommentLine],
["DEFCLASS", "COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
["COMMENT_DOUBLE", "DEFCLASS_BODY", self.end_double_comment_re.search, self.appendCommentLine],
["COMMENT_DOUBLE", "COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
# other lines
["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine],
["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
["DEFCLASS", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch],
### DEFCLASS_BODY
["DEFCLASS_BODY", "DEFCLASS", self.defclass_re.search, self.startCommentSearch],
["DEFCLASS_BODY", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.startCommentSearch],
["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine],
### DEFCLASS_MULTI
["DEFCLASS_MULTI", "DEFCLASS", self.multiline_defclass_end_re.search, self.appendDefclassLine],
["DEFCLASS_MULTI", "DEFCLASS_MULTI", self.catchall, self.appendDefclassLine],
]
self.fsm = FSM("FILEHEAD", transitions)
self.outstream = sys.stdout
self.output = []
self.comment = []
self.filehead = []
self.defclass = []
self.indent = ""
def __closeComment(self):
"""Appends any open comment block and triggering block to the output."""
if options.autobrief:
if len(self.comment) == 1 \
or (len(self.comment) > 2 and self.comment[1].strip() == ''):
self.comment[0] = self.__docstringSummaryToBrief(self.comment[0])
if self.comment:
block = self.makeCommentBlock()
self.output.extend(block)
if self.defclass:
self.output.extend(self.defclass)
def __docstringSummaryToBrief(self, line):
"""Adds \\brief to the docstrings summary line.
A \\brief is prepended, provided no other doxygen command is at the
start of the line.
"""
stripped = line.strip()
if stripped and not stripped[0] in ('@', '\\'):
return "\\brief " + line
else:
return line
def __flushBuffer(self):
"""Flushes the current outputbuffer to the outstream."""
if self.output:
try:
if options.debug:
print >>sys.stderr, "# OUTPUT: ", self.output
print >>self.outstream, "\n".join(self.output)
self.outstream.flush()
except IOError:
# Fix for FS#33. Catches "broken pipe" when doxygen closes
# stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES
# and FILTER_SOURCE_FILES.
pass
self.output = []
def catchall(self, input):
"""The catchall-condition, always returns true."""
return True
def resetCommentSearch(self, match):
"""Restarts a new comment search for a different triggering line.
Closes the current commentblock and starts a new comment search.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: resetCommentSearch"
self.__closeComment()
self.startCommentSearch(match)
def startCommentSearch(self, match):
"""Starts a new comment search.
Saves the triggering line, resets the current comment and saves
the current indentation.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: startCommentSearch"
self.defclass = [self.fsm.current_input]
self.comment = []
self.indent = match.group(1)
def stopCommentSearch(self, match):
"""Stops a comment search.
Closes the current commentblock, resets the triggering line and
appends the current line to the output.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: stopCommentSearch"
self.__closeComment()
self.defclass = []
self.output.append(self.fsm.current_input)
def appendFileheadLine(self, match):
"""Appends a line in the FILEHEAD state.
Closes the open comment block, resets it and appends the current line.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendFileheadLine"
self.__closeComment()
self.comment = []
self.output.append(self.fsm.current_input)
def appendCommentLine(self, match):
"""Appends a comment line.
The comment delimiter is removed from multiline start and ends as
well as singleline comments.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendCommentLine"
(from_state, to_state, condition, callback) = self.fsm.current_transition
# single line comment
if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \
or (from_state == "FILEHEAD" and to_state == "FILEHEAD"):
# remove comment delimiter from begin and end of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):line.rfind(activeCommentDelim)])
if (to_state == "DEFCLASS_BODY"):
self.__closeComment()
self.defclass = []
# multiline start
elif from_state == "DEFCLASS" or from_state == "FILEHEAD":
# remove comment delimiter from begin of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):])
# multiline end
elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD":
# remove comment delimiter from end of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[0:line.rfind(activeCommentDelim)])
if (to_state == "DEFCLASS_BODY"):
self.__closeComment()
self.defclass = []
# in multiline comment
else:
# just append the comment line
self.comment.append(self.fsm.current_input)
def appendNormalLine(self, match):
"""Appends a line to the output."""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendNormalLine"
self.output.append(self.fsm.current_input)
def appendDefclassLine(self, match):
"""Appends a line to the triggering block."""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendDefclassLine"
self.defclass.append(self.fsm.current_input)
def makeCommentBlock(self):
"""Indents the current comment block with respect to the current
indentation level.
@returns a list of indented comment lines
"""
doxyStart = "##"
commentLines = self.comment
commentLines = map(lambda x: "%s# %s" % (self.indent, x), commentLines)
l = [self.indent + doxyStart]
l.extend(commentLines)
return l
def parse(self, input):
"""Parses a python file given as input string and returns the doxygen-
compatible representation.
@param input the python code to parse
@returns the modified python code
"""
lines = input.split("\n")
for line in lines:
self.fsm.makeTransition(line)
if self.fsm.current_state == "DEFCLASS":
self.__closeComment()
return "\n".join(self.output)
def parseFile(self, filename):
"""Parses a python file given as input string and returns the doxygen-
compatible representation.
@param input the python code to parse
@returns the modified python code
"""
f = open(filename, 'r')
for line in f:
self.parseLine(line.rstrip('\r\n'))
if self.fsm.current_state == "DEFCLASS":
self.__closeComment()
self.__flushBuffer()
f.close()
def parseLine(self, line):
"""Parse one line of python and flush the resulting output to the
outstream.
@param line the python code line to parse
"""
self.fsm.makeTransition(line)
self.__flushBuffer()
def optParse():
"""Parses commandline options."""
parser = OptionParser(prog=__applicationName__, version="%prog " + __version__)
parser.set_usage("%prog [options] filename")
parser.add_option("--autobrief",
action="store_true", dest="autobrief",
help="use the docstring summary line as \\brief description"
)
parser.add_option("--debug",
action="store_true", dest="debug",
help="enable debug output on stderr"
)
## parse options
global options
(options, filename) = parser.parse_args()
if not filename:
print >>sys.stderr, "No filename given."
sys.exit(-1)
return filename[0]
def main():
"""Starts the parser on the file given by the filename as the first
argument on the commandline.
"""
filename = optParse()
fsm = Doxypy()
fsm.parseFile(filename)
if __name__ == "__main__":
main()
| agpl-3.0 | -4,336,987,864,930,660,400 | 32.828502 | 114 | 0.696251 | false |
rplevka/selenium | py/test/selenium/webdriver/common/rendered_webelement_tests.py | 63 | 3233 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class RenderedWebElementTests(unittest.TestCase):
@pytest.mark.ignore_chrome
def testShouldPickUpStyleOfAnElement(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-parent")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(0, 128, 0, 1)", backgroundColour)
element = self.driver.find_element(by=By.ID, value="red-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(255, 0, 0, 1)", backgroundColour)
@pytest.mark.ignore_chrome
def testShouldAllowInheritedStylesToBeUsed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs has an issue with getting the right value for background-color")
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("transparent", backgroundColour)
def testShouldCorrectlyIdentifyThatAnElementHasWidth(self):
self._loadPage("xhtmlTest")
shrinko = self.driver.find_element(by=By.ID, value="linkId")
size = shrinko.size
self.assertTrue(size["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(size["height"] > 0, "Height expected to be greater than 0")
def testShouldBeAbleToDetermineTheRectOfAnElement(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support rect command")
self._loadPage("xhtmlTest")
element = self.driver.find_element(By.ID, "username")
rect = element.rect
self.assertTrue(rect["x"] > 0, "Element should not be in the top left")
self.assertTrue(rect["y"] > 0, "Element should not be in the top left")
self.assertTrue(rect["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(rect["height"] > 0, "Height expected to be greater than 0")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 | -8,606,255,659,226,520,000 | 38.91358 | 100 | 0.700588 | false |
slohse/ansible | lib/ansible/modules/system/alternatives.py | 29 | 5286 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Gabe Mulley <[email protected]>
# (c) 2015, David Wittman <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
author:
- "David Wittman (@DavidWittman)"
- "Gabe Mulley (@mulby)"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
required when the alternative I(name) is unknown to the system.
required: false
priority:
description:
- The priority of the alternative
required: false
default: 50
version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives:
name: hadoop-conf
link: /etc/hadoop/conf
path: /etc/hadoop/conf.ansible
- name: make java 32 bit an alternative with low priority
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
priority: -10
'''
import os
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
path=dict(required=True, type='path'),
link=dict(required=False, type='path'),
priority=dict(required=False, type='int',
default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
match = current_path_regex.search(display_output)
if match:
current_path = match.group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not os.path.exists(path):
module.fail_json(msg="Specified path %s does not exist" % path)
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError as cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,584,142,614,335,149,000 | 31.036364 | 125 | 0.602913 | false |
mapr/hue | desktop/core/ext-py/Django-1.6.10/tests/model_fields/test_imagefield.py | 54 | 16032 | from __future__ import absolute_import
import os
import shutil
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from django.core.files.images import ImageFile
from django.test import TestCase
from django.utils._os import upath
from django.utils.unittest import skipIf
try:
from .models import Image
except ImproperlyConfigured:
Image = None
if Image:
from .models import (Person, PersonWithHeight, PersonWithHeightAndWidth,
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile)
from .models import temp_storage_dir
else:
# Pillow not available, create dummy classes (tests will be skipped anyway)
class Person():
pass
PersonWithHeight = PersonWithHeightAndWidth = PersonDimensionsFirst = Person
PersonTwoImages = Person
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(upath(__file__)), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(upath(__file__)), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
p2 = self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
_ = p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
@skipIf(Image is None, "PIL is required to test ImageField")
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
| apache-2.0 | 5,925,871,952,473,017,000 | 35.60274 | 80 | 0.637226 | false |
rfinn/LCS | paper1code/LCSreadmaster.v2.py | 1 | 46244 | #!/usr/bin/env python
import pyfits
from LCScommon import *
from pylab import *
import os
import mystuff as my
#these correpond to area w/more uniform covereage
MKW824um=array([220.16377,3.4883817,1.3137727,2.5,12.7456],'f')
MKW1124um=array([202.36305,11.746882,1.2454248,2.9,206.4],'f')
NGC24um=array([244.30994,34.933704,1.2865442,2.5,321.317],'f')
def drawbox(data,style):#feed in center x,y,dx,dy,rotation E of N
#xcoords of unrotated box, going around CCW
xl=array([data[0]-0.5*data[2],data[0]+0.5*data[2],data[0]+0.5*data[2],data[0]-0.5*data[2],data[0]-0.5*data[2]],'d')
yl=array([data[1]-0.5*data[3],data[1]-0.5*data[3],data[1]+0.5*data[3],data[1]+0.5*data[3],data[1]-0.5*data[3] ],'d')
xl=array([-0.5*data[2],+0.5*data[2],+0.5*data[2],-0.5*data[2],-0.5*data[2]],'d')
yl=array([-0.5*data[3],-0.5*data[3],+0.5*data[3],+0.5*data[3],-0.5*data[3] ],'d')
ang=data[4]*pi/180.*-1.#convert rotation to radians
#rotate coordinates
xp=cos(ang)*xl-sin(ang)*yl
yp=sin(ang)*xl+cos(ang)*yl
#put back on absolute scale
xp=data[0]+xp
yp=data[1]+yp
#draw rotated box
plot(xp,yp,style)
def calcC90(x,y,yerr,fluxencl):#radius("),intensity,error,
#find average using last 3 pts, which should approximate sky
sky=mean(y[len(y)-3:len(y)])
#subtract sky from y
sy=y-sky
#multiply y by r**2 to account for increasing area
toty=y*(x**2)
#sum r=0-36arcsec to get total flux
totygood=toty[0:len(y)-3]
rgood=x[0:len(y)-3]
totflux=sum(totygood)
#start summing from 39arcsec inward, until it reaches 10% of total flux
sum10=0
tenpercent=.1*totflux
thirty=.3*totflux
for i in range(len(totygood)):
index=len(totygood)-1-i
sum10 += totygood[index]
#print sky,sum10,tenpercent,totflux
if sum10 > tenpercent:
r90=rgood[index]
break
#calculate r30
sum30=0
for i in range(len(totygood)):
sum30 += totygood[i]
if sum30 > thirty:
r30=rgood[i]
break
##calculate r90 using enclosed flux array
#find max of array
maxEncFlux=max(fluxencl)
indexMax=where((fluxencl == maxEncFlux))
#break index out of array and into a plain integer
indexMax=indexMax[0]
#use index of max of array and move inward until value is .9 max
transitionFlux=0.9*maxEncFlux
for i in range(indexMax):
index=indexMax-i
if fluxencl[index] < transitionFlux:
r90FromEncFlux=x[index]
break
#calculate r30FromEncFlux
transitionFlux=0.3*maxEncFlux
for i in range(indexMax):
index=i
if fluxencl[index] > transitionFlux:
r30FromEncFlux=x[index]
break
#return radius at C90 (the radius that encloses 90% of the light) and sky
try:
return r90,sky,r90FromEncFlux,maxEncFlux,r30,r30FromEncFlux
except UnboundLocalError:
print "Warning: Could not find R90"
try:
return 0,sky,r90FromEncFlux,maxEncFlux,r30,r30FromEncFlux
except UnboundLocalError:
print "Warning: Could not find R30 From Enc Flux"
return 0,sky,r90FromEncFlux,maxEncFlux,r30,0
class cluster:
def __init__(self,clustername):
#Get current path so program can tell if this is being run on Becky or Rose's computer
self.prefix=clustername
self.cra=clusterRA[self.prefix]
self.cdec=clusterDec[self.prefix]
self.cz=clusterz[self.prefix]
self.biweightvel=clustercbi[self.prefix]
self.biweightscale=clustersbi[self.prefix]
self.r200=2.02*(self.biweightscale)/1000./sqrt(OmegaL+OmegaM*(1.+self.cz)**3)*H0/70. # in Mpc
self.r200deg=self.r200*1000./my.DA(self.cz,h)/3600.
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
infile='/Users/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
infile='/home/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/home/rfinn/'
#infile='/home/rfinn/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
tb=pyfits.open(infile)
tbdata=tb[1].data
tb.close()
self.agcflag=tbdata.field('AGCflag')
self.HIflag=tbdata.field('HIFLAG')
self.sdssflag=tbdata.field('SDSSflag')
self.sdssphotflag=tbdata.field('SDSSphotflag')
self.mpaflag=tbdata.field('MPAFLAG')
self.apexflag=tbdata.field('APEXFLAG')
self.sexsdssflag=tbdata.field('SEXSDSSflag')
self.sex24flag=tbdata.field('SEX24FLAG')
self.agcvoptflag=tbdata.field('AGCVOPTFLAG')
self.agcnumber=tbdata.field('AGCNUMBER')
self.raagc=tbdata.field('AGCRA')
self.decagc=tbdata.field('AGCDEC')
self.a100=tbdata.field('A100')
self.b100=tbdata.field('B100')
self.mag10=tbdata.field('MAG10')
self.posang=tbdata.field('POSANG')
self.bsteintype=tbdata.field('BSTEINTYPE')
self.vopt=tbdata.field('VOPT')
self.verr=tbdata.field('VERR')
self.vsource=tbdata.field('VSOURCE')
self.flux100=tbdata.field('FLUX100')
self.rms100=tbdata.field('RMS100')
self.v21=tbdata.field('V21')
self.width=tbdata.field('WIDTH')
self.widtherr=tbdata.field('WIDTHERR')
#sdss info
self.sdssra=tbdata.field('SDSSRA')
self.sdssdec=tbdata.field('SDSSDEC')
self.sdssphotra=tbdata.field('SDSSphotRA')
self.sdssphotdec=tbdata.field('SDSSphotDEC')
self.sdssu=tbdata.field('SDSSU')
self.sdssg=tbdata.field('SDSSG')
self.sdssr=tbdata.field('SDSSR')
self.sdssi=tbdata.field('SDSSI')
self.sdssz=tbdata.field('SDSSZ')
self.sdssspecz=tbdata.field('SDSSSPECZ')
self.sdssvopt=tbdata.field('SDSSVOPT')
self.sdsshaew=tbdata.field('SDSSHAEW')
self.sdsshaewerr=tbdata.field('SDSSHAEWERR')
self.sdssplate=tbdata.field('SDSSPLATE')
self.sdssfiberid=tbdata.field('SDSSFIBERID')
self.sdsstile=tbdata.field('SDSSTILE')
self.mpahalpha=tbdata.field('MPAHALPHA')
self.mpahbeta=tbdata.field('MPAHBETA')
self.mpao3=tbdata.field('MPAOIII')
self.mpan2=tbdata.field('MPANII')
#sextractor info
self.numberser=tbdata.field('NUMBERSER')
self.ximageser=tbdata.field('XIMAGESER')
self.yimageser=tbdata.field('YIMAGESER')
self.xminimageser=tbdata.field('XMINIMAGESER')
self.xmaximageser=tbdata.field('XMAXIMAGESER')
self.yminimageser=tbdata.field('YMINIMAGESER')
self.raser=tbdata.field('RASER')
self.decser=tbdata.field('DECSER')
self.fluxisoser=tbdata.field('FLUXISOSER')
self.fluxerrisoser=tbdata.field('FLUXERRISOSER')
self.magisoser=tbdata.field('MAGISOSER')
self.magerrisoser=tbdata.field('MAGERRISOSER')
self.fluxautoser=tbdata.field('FLUXAUTOSER')
self.fluxerrautoser=tbdata.field('FLUXERRAUTOSER')
self.magautoser=tbdata.field('MAGAUTOSER')
self.magerrautoser=tbdata.field('MAGERRAUTOSER')
self.fluxpetroser=tbdata.field('FLUXPETROSER')
self.fluxerrpetroser=tbdata.field('FLUXERRPETROSER')
self.magpetroser=tbdata.field('MAGPETROSER')
self.magerrpetroser=tbdata.field('MAGERRPETROSER')
self.kronradser=tbdata.field('KRONRADSER')#kron radius
self.petroradser=tbdata.field('PETRORADSER')#petrosian radius
self.fluxradser=tbdata.field('FLUXRADSER')#1/2 light radius
self.isoareaser=tbdata.field('ISOAREASER')
self.aworldser=tbdata.field('AWORLDSER')
self.bworldser=tbdata.field('BWORLDSER')
self.thetaser=tbdata.field('THETASER')
self.errthetaser=tbdata.field('ERRTHETASER')
self.thetaj2000ser=tbdata.field('THETAJ2000SER')
self.errthetaj2000ser=tbdata.field('ERRTHETAJ2000SER')
self.elongser=tbdata.field('ELONGATIONSER')
self.elliptser=tbdata.field('ELLIPTICITYSER')
self.fwhmser=tbdata.field('FWHMSER')
self.flagsser=tbdata.field('FLAGSSER')
self.classstarser=tbdata.field('CLASSSTARSER')
#SEXTRACTOR output 24 micron data
self.numberse24=tbdata.field('NUMBERSE24')
self.ximagese24=tbdata.field('XIMAGESE24')
self.yimagese24=tbdata.field('YIMAGESE24')
self.xminimagese24=tbdata.field('XMINIMAGESE24')
self.xmaximagese24=tbdata.field('XMAXIMAGESE24')
self.xminimagese24=tbdata.field('YMINIMAGESE24')
self.rase24=tbdata.field('RASE24')
self.decse24=tbdata.field('DECSE24')
self.fluxisose24=tbdata.field('FLUXISOSE24')
self.fluxerrisose24=tbdata.field('FLUXERRISOSE24')
self.magisose24=tbdata.field('MAGISOSE24')
self.magerrisose24=tbdata.field('MAGERRISOSE24')
self.fluxautose24=tbdata.field('FLUXAUTOSE24')
self.fluxerrautose24=tbdata.field('FLUXERRAUTOSE24')
self.magautose24=tbdata.field('MAGAUTOSE24')
self.magerrautose24=tbdata.field('MAGERRAUTOSE24')
self.fluxpetrose24=tbdata.field('FLUXPETROSE24')
self.fluxerrpetrose24=tbdata.field('FLUXERRPETROSE24')
self.magpetrose24=tbdata.field('MAGPETROSE24')
self.magerrpetrose24=tbdata.field('MAGERRPETROSE24')
self.kronradse24=tbdata.field('KRONRADSE24')
self.petroradse24=tbdata.field('PETRORADSE24')
self.fluxradse24=tbdata.field('FLUXRADSE24')
self.isoarease24=tbdata.field('ISOAREASE24')
self.aworldse24=tbdata.field('AWORLDSE24')
self.bworldse24=tbdata.field('BWORLDSE24')
self.thetase24=tbdata.field('THETASE24')
self.errthetase24=tbdata.field('ERRTHETASE24')
self.thetaj2000se24=tbdata.field('THETAJ2000SE24')
self.errthetaj2000se24=tbdata.field('ERRTHETAJ2000SE24')
self.elongse24=tbdata.field('ELONGATIONSE24')
self.elliptse24=tbdata.field('ELLIPTICITYSE24')
self.fwhmse24=tbdata.field('FWHMSE24')
self.flagsse24=tbdata.field('FLAGSSE24')
self.classstarse24=tbdata.field('CLASSSTARSE24')
self.f24dist=self.fluxautose24[self.sex24flag]
#apex output
self.mipsra=tbdata.field('MIPSRA')
self.mipsdec=tbdata.field('MIPSDEC')
self.mipsflux=tbdata.field('MIPSFLUX')
self.mipsfluxerr=tbdata.field('MIPSFLUXERR')
self.mipssnr=tbdata.field('MIPSSNR')
self.mipsdeblend=tbdata.field('MIPSDEBLEND')
self.mipsfluxap1=tbdata.field('MIPSFLUXAP1')
self.mipsfluxap1err=tbdata.field('MIPSFLUXAP1ERR')
self.mipsfluxap2=tbdata.field('MIPSFLUXAP2')
self.mipsfluxap2err=tbdata.field('MIPSFLUXAP2ERR')
self.mipsfluxap3=tbdata.field('MIPSFLUXAP3')
self.mipsfluxap4err=tbdata.field('MIPSFLUXAP3ERR')
self.On24ImageFlag=tbdata.field('On24ImageFlag')
self.supervopt=tbdata.field('SUPERVOPT')
self.ra=tbdata.field('SUPERRA')
self.dec=tbdata.field('SUPERDEC')
self.stellarmass=tbdata.field('STELLARMASS')
self.sdssMu=tbdata.field('SDSSMU')
self.sdssLu=tbdata.field('SDSSLU')
self.sdssMg=tbdata.field('SDSSMG')
self.sdssLg=tbdata.field('SDSSLG')
self.sdssMr=tbdata.field('SDSSMR')
self.sdssLr=tbdata.field('SDSSLR')
self.sdssMi=tbdata.field('SDSSMI')
self.sdssLi=tbdata.field('SDSSLI')
self.sdssMz=tbdata.field('SDSSMZ')
self.sdssLz=tbdata.field('SDSSLZ')
self.membflag =tbdata.field('MEMBFLAG')
self.morphflag =tbdata.field('MORPHFLAG')
self.morph =tbdata.field('MORPH')
self.disturb =tbdata.field('DISTURB')
self.localdens =tbdata.field('LOCALDENS')
self.agn1 =tbdata.field('AGNKAUFF')
self.agn2 =tbdata.field('AGNKEWLEY')
self.agn3 =tbdata.field('AGNSTASIN')
self.logn2halpha=log10(self.mpan2/self.mpahalpha)
self.logo3hbeta=log10(self.mpao3/self.mpahbeta)
self.ellipseflag24 =tbdata.field('ELLIPSEFLAG24')
self.ellipseflagsdss =tbdata.field('ELLIPSEFLAGSDSS')
self.ellipseflag =tbdata.field('ELLIPSEFLAG')
# galaxy zoo fields
self.galzooflag =tbdata.field('GALZOOFLAG')
self.galzoonvote =tbdata.field('GALZOONVOTE')
self.galzoopel =tbdata.field('GALZOOPEL')
self.galzoopcw =tbdata.field('GALZOOPCW')
self.galzoopacw =tbdata.field('GALZOOPACW')
self.galzoopedge =tbdata.field('GALZOOPEDGE')
self.galzoopdk =tbdata.field('GALZOOPDK')
self.galzoopmg =tbdata.field('GALZOOPMG')
self.galzoopcs =tbdata.field('GALZOOPCS')
self.galzoopeldebiased =tbdata.field('GALZOOPELDEBIASED')
self.galzoopcsdebiased =tbdata.field('GALZOOPCSDEBIASED')
self.galzoospiral =tbdata.field('GALZOOSPIRAL')
self.galzooelliptical =tbdata.field('GALZOOELLIPTICAL')
self.galzoouncertain =tbdata.field('GALZOOUNCERTAIN')
#new SDSS fields that quantify radial extent of galaxy
self.sdssIsoAr =tbdata.field('SDSSISOAR')
self.sdssIsoBr =tbdata.field('SDSSISOBR')
self.sdssIsoPhir =tbdata.field('SDSSISOPHIR')
self.sdssIsoPhirErr =tbdata.field('SDSSISOPHIERRR')
self.sdssExpRadr =tbdata.field('SDSSEXPRADR')
self.sdssExpABr =tbdata.field('SDSSEXPABR')
self.sdssExpABrErr =tbdata.field('SDSSEXPABRERR')
self.sdssExpPhir =tbdata.field('SDSSEXPPHIR')
self.sdssExpPhirErr =tbdata.field('SDSSEXPPHIERRR')
self.sdssumag=tbdata.field('SDSSDEREDU')#de-redened magnitudes
self.sdssgmag=tbdata.field('SDSDEREDSG')
self.sdssrmag=tbdata.field('SDSSDEREDR')
self.sdssimag=tbdata.field('SDSSDEREDI')
self.sdsszmag=tbdata.field('SDSSDEREDZ')
#end of master table!
#self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & (self.galzoopcsdebiased > 0.6)
self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & self.galzoospiral
self.clustername=clustername
self.clusterra=clusterRA[clustername]
self.clusterdec=clusterDec[clustername]
self.dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
self.drR200=self.dr/self.r200deg
self.clustervel=clustervel[clustername]
self.clustersigma=clustersigma[clustername]
self.clustervmin=self.clustervel-3.*self.clustersigma
self.clustervmax=self.clustervel+3.*self.clustersigma
self.dist=sqrt((self.clusterra-self.ra)**2 + (self.clusterdec-self.dec)**2)
self.flagHI = (self.flux100 > 0.)
self.flagmemb = ((self.vopt > self.clustervmin) & (self.vopt < self.clustervmax)) | ((self.v21 > self.clustervmin) & (self.v21 < self.clustervmax))
self.allvelocity=3.e5*self.sdssspecz
for i in range(len(self.allvelocity)):
if self.sdssflag[i] < 1:
if self.v21[i] > 0:
self.allvelocity[i]=self.v21[i]
else:
self.allvelocity[i]=self.vopt[i]
self.nmemb=len(self.dist[self.membflag & self.On24ImageFlag])
self.nfield=len(self.dist[self.On24ImageFlag])-self.nmemb
print self.clustername,": ","N members = ",self.nmemb," N field = ",self.nfield
print ' N spirals = ',sum(self.spiralFlag),' Nspiral members = ',sum(self.spiralFlag&self.membflag)
def plotagn(self):
figure()
clf()
plot(self.logn2halpha,self.logo3hbeta,'k.')
plot(self.logn2halpha[self.agn2],self.logo3hbeta[self.agn2],'co',markersize=12)
plot(self.logn2halpha[self.agn1],self.logo3hbeta[self.agn1],'go',markersize=8)
plot(self.logn2halpha[self.agn3],self.logo3hbeta[self.agn3],'ro',markersize=4)
#draw AGN diagnostic lines
x=arange(-3,1,.01)
y=(.61/(x-.47)+1.19)#Kewley
plot(x,y,'c')
y =(.61/(x-.05)+1.3)#Kauffman 2003?
plot(x,y,'g')
y = ((-30.787+(1.1358*x)+((.27297)*(x)**2))*tanh(5.7409*x))-31.093 #Stasinska 2006
plot(x,y,'r')
axis([-3,1.,-2,2])
def getFilesForProfileFitting(self):
outfile1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.24.dat'
outfile2=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.sdss.dat'
outfile3=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.24.dat'
outfile4=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.sdss.dat'
agcSpiral=self.agcnumber[self.spiralFlag]
out1=open(outfile1,'w')
out2=open(outfile2,'w')
out3=open(outfile3,'w')
out4=open(outfile4,'w')
cutoutpath=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'
for i in range(len(agcSpiral)):
outim=cutoutpath+self.prefix+'-'+str(agcSpiral[i])+'-cutout-sdss.fits \n'
outim24=cutoutpath+self.prefix+'-'+str(agcSpiral[i])+'-cutout-24-rot.fits \n'
out1.write(outim24)
out2.write(outim)
outfile=homedir+'research/LocalClusters/EllipseTables/'+self.prefix+'/'+self.prefix+'-'+str(agcSpiral[i])+'-cutout-sdss.dat \n'
outfile24=homedir+'research/LocalClusters/EllipseTables/'+self.prefix+'/'+self.prefix+'-'+str(agcSpiral[i])+'-cutout-24-rot.dat \n'
out3.write(outfile24)
out4.write(outfile)
out1.close()
out2.close()
out3.close()
out4.close()
def plotpositions(self):
#figure()
#clf()
#draw footprint of mips data, if applicable
#if self.clustername.find('MKW8') > -1:
# drawbox(MKW824um,'r-')
#if self.clustername.find('MKW11') > -1:
# drawbox(MKW1124um,'r-')
#if self.clustername.find('NGC6107') > -1:
# drawbox(NGC24um,'r-')
#scatter(ra[flag],dec[flag],s=(20-agcmag10[flag]/10)*20+20,color='.8')
#plot(ra[flag],dec[flag],'k.')
plot(self.ra[self.sdssflag],self.dec[self.sdssflag],'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag],self.dec[self.HIflag],'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag],self.dec[self.apexflag],'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([self.clusterra]),array([self.clusterdec]),'kx',markersize=15,lw=8)#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
axis('equal')
drawbox(cluster24Box[self.clustername],'g-')
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,2,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(round(ymin),ymax+1,2,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotpositionson24(self):
plot(self.ra[self.sdssflag & self.On24ImageFlag],self.dec[self.sdssflag& self.On24ImageFlag],'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag & self.On24ImageFlag],self.dec[self.HIflag & self.On24ImageFlag],'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag],self.dec[self.apexflag],'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([self.clusterra]),array([self.clusterdec]),'kx',markersize=15,lw=8)#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#axis('equal')
drawbox(cluster24Box[self.clustername],'g-')
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax,1,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(round(ymin),ymax,1,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotrelativepositionson24(self):
plot(self.ra[self.sdssflag & self.On24ImageFlag]-self.clusterra,self.dec[self.sdssflag& self.On24ImageFlag]-self.clusterdec,'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag & self.On24ImageFlag]-self.clusterra,self.dec[self.HIflag & self.On24ImageFlag]-self.clusterdec,'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag]-self.clusterra,self.dec[self.apexflag]-self.clusterdec,'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([0]),array([0]),'kx',markersize=15,lw=8,label='_nolegend_')#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#axis('equal')
drawbox(cluster24Box[self.clustername]-array([self.clusterra,self.clusterdec,0,0,0]),'g-')
axis([-1.5,1.5,-2.,2.])
xmin,xmax=xlim()
xticks(arange(-1,2,1,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(-2,3,1,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotveldron24(self):
dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
dv=self.supervopt-self.biweightvel
membflag=(dr/self.r200deg < 1) & (abs(dv) < 3.*self.biweightscale)
plot(dr[self.On24ImageFlag],self.supervopt[self.On24ImageFlag],'k.',markersize=3)
plot(dr[membflag & self.On24ImageFlag],self.supervopt[membflag & self.On24ImageFlag],'bo',markersize=4)
ymin=3500
ymax=14000
axis([0,2,ymin,ymax])
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,1,'i'),fontsize=10)
yticks(arange(4000,ymax,4000,'i'),fontsize=10)
title(self.clustername,fontsize=12)
axhline(self.biweightvel,ls='-',color='r')
axhline(self.biweightvel+3*self.biweightscale,ls='--',color='r')
axhline(self.biweightvel-3*self.biweightscale,ls='--',color='r')
def plotveldr(self):
dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
dv=self.supervopt-self.biweightvel
membflag=(dr/self.r200deg < 1) & (abs(dv) < 3.*self.biweightscale)
plot(dr,self.supervopt,'k.',markersize=3)
plot(dr[membflag],self.supervopt[membflag],'bo',markersize=6)
ymin=3500
ymax=14000
axis([0,3,ymin,ymax])
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,1,'i'),fontsize=10)
yticks(arange(4000,ymax,4000,'i'),fontsize=10)
title(self.clustername,fontsize=12)
axhline(self.biweightvel,ls='-',color='r')
axhline(self.biweightvel+3*self.biweightscale,ls='--',color='r')
axhline(self.biweightvel-3*self.biweightscale,ls='--',color='r')
def plotvelhist(self):
figure()
bins=30
x1=self.allvelocity
(yhist,xhist,patches)=hist(x1,bins)
xhist=xhist[0:len(xhist)-1]+0.5*(xhist[1]-xhist[0])
mymean= average(x1)
mystd=std(x1)
print mystd
norm=max(yhist)
xmin=3000
xmax=15000
xplot=arange(xmin,xmax,50)
y1=norm*exp(-((xplot -self.clustervel)**2)/(2*self.clustersigma**2))
plot(xplot,y1,'r-')
xlabel('Recession Velocity ')
xlim(xmin,xmax)
axvline(self.clustervel,ymin=0,ymax=60,color='r')
def plotlf(self):
figure(1)
y=hist(self.f24dist,histtype='step')
ngal=y[0]
x=y[1]
xbin=zeros(len(ngal))
for i in range(len(xbin)):
xbin[i]=0.5*(x[i]+x[i+1])
#clf()
self.xbin=xbin
self.ngal=ngal
figure(2)
plot(xbin,ngal,'ro')
errorbar(xbin,ngal,sqrt(ngal))
ax=gca()
ax.set_yscale('log')
ax.set_xscale('log')
xlabel('24um Flux')
def checkmorph(self):#print out files that our class and burstein type disagree or no burstein type
flag=(self.morph==3)
self.summerSpirals=self.agcnumber[flag]
bflag=((self.bsteintype>=120)&(self.bsteintype<183))|((self.bsteintype>=300)&(self.bsteintype<400))
self.bflag=bflag
funnyflag= ((~(flag) & bflag) | (flag & ~bflag)) & self.morphflag & (self.bsteintype > 0)
self.funnySpirals=self.agcnumber[funnyflag]
self.ourtype=self.morph[funnyflag]
self.theirtype=self.bsteintype[funnyflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.funnySpirals.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.funnySpirals'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
nobflag= ((self.bsteintype == 0) & (self.On24ImageFlag))
self.funnySpirals=self.agcnumber[nobflag]
self.ourtype=self.morph[nobflag]
self.theirtype=self.bsteintype[nobflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noBsteinSpirals.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noBsteinSpirals'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
missmorphflag= ((self.morphflag == 0) & (self.On24ImageFlag))
self.funnySpirals=self.agcnumber[missmorphflag]
self.ourtype=self.morph[missmorphflag]
self.theirtype=self.bsteintype[missmorphflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noMorph.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noMorph'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
def fitprofiles(self):
#get list from LCSreadmaster.py
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.sdss.dat'
infile1=open(inf1,'r')
sfiles=[]
for line in infile1:
t=line.rstrip()
sfiles.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.24.dat'
infile1=open(inf1,'r')
s24files=[]
for line in infile1:
t=line.rstrip()
s24files.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.sdss.dat'
infile1=open(inf1,'r')
simages=[]
for line in infile1:
t=line.rstrip()
simages.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.24.dat'
infile1=open(inf1,'r')
s24images=[]
for line in infile1:
t=line.rstrip()
s24images.append(t)
#print t
infile1.close()
pscale24=2.45#arcsec per pixel
pscalesdss=1.#arcsec per pixel
nrow=2
ncol=4
xticksize=10
yticksize=10
#for i in range(0,len(sfiles),nrow):
ngal=0
ngaltot=1.*len(sfiles)
ratio=ngaltot/((nrow*ncol)/8.)
npage=round(ratio)
if ratio > npage:
npage += 1
npage=ngaltot
#print "Ngal = ",ngaltot
#print "Npage = ",npage
redshift=(self.supervopt[self.spiralFlag]-self.clustervel)/self.clustersigma
member=self.flagmemb[self.spiralFlag]
dr=self.drR200[self.spiralFlag]
index=arange(len(self.spiralFlag))
spiralIndex=index[self.spiralFlag]
vminsdss=-400
vmaxsdss=100
vmin24=-2.1
vmax24=.5
self.r0SDSS=zeros(len(spiralIndex),'f')#scale length from exponential fit
self.r30SDSS=zeros(len(spiralIndex),'f')
self.r90SDSS=zeros(len(spiralIndex),'f')
self.skySDSS=zeros(len(spiralIndex),'f')
self.r30EncFluxSDSS=zeros(len(spiralIndex),'f')
self.r90EncFluxSDSS=zeros(len(spiralIndex),'f')
self.MaxEncFluxSDSS=zeros(len(spiralIndex),'f')
#same array for 24um
self.r0F24=zeros(len(spiralIndex),'f')#scale length from exponential fit
self.r30F24=zeros(len(spiralIndex),'f')
self.r90F24=zeros(len(spiralIndex),'f')
self.skyF24=zeros(len(spiralIndex),'f')
self.r30EncFluxF24=zeros(len(spiralIndex),'f')
self.r90EncFluxF24=zeros(len(spiralIndex),'f')
self.MaxEncFluxF24=zeros(len(spiralIndex),'f')
for i in range(int(npage)):
figure(figsize=(15,5))
subplots_adjust(left=0.05, right=.95,bottom=.1,top=0.9,wspace=0.4,hspace=0.6)
clf()
print 'ngal = ',ngal
for j in range(0,ncol*nrow,8):
t=sfiles[ngal]
t1=t.split('/')
t2=t1[len(t1)-1].split('-')
if len(t2) > 4:
galname='-'+t2[2]
else:
galname=t2[1]
t=s24files[ngal]
t1=t.split('/')
t2=t1[len(t1)-1].split('-')
if len(t2) > 5:
galname24='-'+t2[2]
else:
galname24=t2[1]
subplot(nrow,ncol,j+1)#sdss image
fits=pyfits.open(simages[ngal])
im=fits[0].data.copy()
fits.close()
axis('equal')
imshow(-1.*(im),interpolation='nearest',origin='upper',vmin=vminsdss,vmax=vmaxsdss,cmap=cm.Greys)
ax=gca()
ax.set_yticklabels(([]))
ax.set_xticklabels(([]))
size='100\"'
#text(.92, .5, size, horizontalalignment='center', verticalalignment='center',rotation=270, transform=ax.transAxes,fontsize=10)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
s='$'+self.prefix+': \ '+galname+'$'
title(s,fontsize=12)
s='$ r-band$'
ylabel(s,fontsize=14)
xlabel(r'$100 X 100 \ arcsec^2$',fontsize=10)
#ylabel('$100 \ arcsec$')
## subplot(nrow,ncol,j+2)#sdss masked image
## fits=pyfits.open(simages[ngal])
## im=fits[0].data.copy()
## fits.close()
## axis('equal')
## imshow(-1.*(im),interpolation='nearest',origin='upper')#,cmap='binary')#,vmin=myvmin,vmax=myvmax,cmap=cm.Greys)
## ax=gca()
## ax.set_yticklabels(([]))
## ax.set_xticklabels(([]))
## text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+3)#sdss profile
edat=load(sfiles[ngal],usecols=[1,2,3,21,40])
x=edat[:,0]
y=edat[:,1]
yerr=edat[:,2]
tflux=edat[:,3]
sarea=edat[:,4]
plot(x,y,'b.')
errorbar(x,y,yerr,fmt=None)
r90,sky,r90EncFlux,MaxEncFlux,r30,r30EncFlux=calcC90(x,y,yerr,tflux)
self.r30SDSS[ngal]=r30
self.r90SDSS[ngal]=r90
self.skySDSS[ngal]=sky
self.r30EncFluxSDSS[ngal]=r30EncFlux
self.r90EncFluxSDSS[ngal]=r90EncFlux
self.MaxEncFluxSDSS[ngal]=MaxEncFlux
axhline(sky,color='k',ls=':')
axvline(r90,color='k',ls='--')
axvline(r30,color='c',ls='--')
xlabel('$r \ (arcsec)$')
ylabel('$I(r)$')
s='$sky = %5.2f, \ r30 = %5.1f, \ r90 = %5.1f$'%(sky,r30,r90)
title(s,fontsize=10)
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+2)
plot(x,tflux,'b.')
#plot(x,y*sarea,'r.')
axhline(MaxEncFlux,color='k',ls=':')
axvline(r90EncFlux,color='k',ls='--')
axvline(r30EncFlux,color='c',ls='--')
s='$max(F_{enc}) = %5.2e, \ r30 = %5.1f, \ r90 = %5.1f$'%(MaxEncFlux,r30EncFlux,r90EncFlux)
title(s,fontsize=10)
xlabel('$r \ (arcsec)$')
ylabel('$\Sigma Flux(<r)$')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
xlim(0,50)
subplot(nrow,ncol,j+4)#sdss ln profile with fit
xfit=(x[y>5])
yfit=log(y[y>5])
m,b=polyfit(xfit,yfit,1)
plot(xfit,yfit-b,'r^')
xlabel('$r (arcsec)$')
ylabel(r'$ln(I(r))-ln(I_0)$')
#gradient, intercept, r_value, p_value, std_err = stats.linregress(xfit,yfit)
plot(xfit,m*xfit,'g')
axvline(-1./m,color='k',ls='--')
axhline(-1,color='k',ls=':')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
s='$R_0 = %5.1f$'%(-1./m)
title(s, fontsize=10)
self.r0SDSS[ngal]=-1./m
subplot(nrow,ncol,j+5)#sdss image
fits=pyfits.open(s24images[ngal])
im=fits[0].data.copy()
fits.close()
axis('equal')
imshow(-1.*(im),interpolation='nearest',origin='upper',vmin=vmin24,vmax=vmax24,cmap=cm.Greys)
ax=gca()
ax.set_yticklabels(([]))
ax.set_xticklabels(([]))
#text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
ylabel('$24 \ \mu m$',fontsize=14)
s1='$\Delta v/\sigma = %5.2f, \ \Delta r/R_{200} = %5.2f$'%(redshift[ngal],dr[ngal])
title(s1,fontsize=10)
xlabel(r'$100 X 100 \ arcsec^2$',fontsize=10)
## subplot(nrow,ncol,j+6)#sdss masked image
## fits=pyfits.open(s24images[ngal])
## im=fits[0].data.copy()
## fits.close()
## axis('equal')
## imshow(-1.*(im),interpolation='nearest',origin='upper')#,cmap='binary')#,vmin=myvmin,vmax=myvmax,cmap=cm.Greys)
## ax=gca()
## ax.set_yticklabels(([]))
## ax.set_xticklabels(([]))
## text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+7)#sdss profile
edat=load(s24files[ngal],usecols=[1,2,3,21])
x=edat[:,0]
y=edat[:,1]
yerr=edat[:,2]
tflux=edat[:,3]
plot(x*pscale24,y,'b.')
errorbar(x*pscale24,y,yerr,fmt=None)
r90,sky,r90EncFlux,MaxEncFlux,r30,r30EncFlux=calcC90(x*pscale24,y,yerr,tflux)
self.r30F24[ngal]=r30
self.r90F24[ngal]=r90
self.skyF24[ngal]=sky
self.r30EncFluxF24[ngal]=r30EncFlux
self.r90EncFluxF24[ngal]=r90EncFlux
self.MaxEncFluxF24[ngal]=MaxEncFlux
xlabel('$r \ (arcsec)$')
ylabel('$I(r)$')
s='$sky = %5.2f, \ r30 = %5.1f, \ r90 = %5.1f$'%(sky,r30,r90)
title(s,fontsize=10)
axhline(sky,color='k',ls=':')
axvline(r90,color='k',ls='--')
axvline(r30,color='c',ls='--')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
xlim(0,50)
subplot(nrow,ncol,j+6)
plot(x*pscale24,tflux,'b.')
axhline(MaxEncFlux,color='k',ls=':')
axvline(r90EncFlux,color='k',ls='--')
axvline(r30EncFlux,color='c',ls='--')
xlabel('$r \ (arcsec)$')
ylabel('$\Sigma Flux(<r)$')
s='$max(F_{enc}) = %5.2e, \ r30 = %5.1f, \ r90 = %5.1f$'%(MaxEncFlux,r30EncFlux,r90EncFlux)
title(s,fontsize=10)
xlim(0,50)
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
subplot(nrow,ncol,j+8)#sdss ln profile with fit
xfit=(x[y>.05])
yfit=log(y[y>.05])
if len(xfit) > 1:
m,b=polyfit(xfit*pscale24,yfit,1)
plot(xfit*pscale24,m*xfit*pscale24,'g')
plot(xfit*pscale24,yfit-b,'r^')
else:
plot(xfit*pscale24,yfit,'r^')
xlabel('$r (arcsec)$')
ylabel(r'$ln(I(r))-ln(I_0)$')
#gradient, intercept, r_value, p_value, std_err = stats.linregress(xfit,yfit)
axvline(-1./m,color='k',ls='--')
axhline(-1,color='k',ls=':')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
s='$R_0 = %5.1f$'%(-1./m)
title(s, fontsize=10)
self.r0F24[ngal]=-1./m
ngal += 1
if ngal >= ngaltot:
figname=self.prefix+'Profiles'+str(galname)+'.png'
savefig(figname)
break
figname=self.prefix+'Profiles'+str(galname)+'.png'
savefig(figname)
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
homedir='/home/rfinn/'
mkw11=cluster('MKW11')
mkw8=cluster('MKW8')
awm4=cluster('AWM4')
a2052=cluster('A2052')
a2063=cluster('A2063')
ngc=cluster('NGC6107')
coma=cluster('Coma')
herc=cluster('Hercules')
a1367=cluster('A1367')
def plotpositionsall():
figure()
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotpositions()
ax=gca()
text(-.75,-.35,'RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotPositionsAll.eps')
def plotpositionson24all():
figure()
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotpositionson24()
ax=gca()
text(-.75,-.35,'RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotPositionsOn24All.eps')
def plotrelativepositionson24all():
figure(figsize=[9,9])
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotrelativepositionson24()
leg=legend(numpoints=1)#,fontsize=12)
for t in leg.get_texts():
t.set_fontsize('small')
ax=gca()
text(-.75,-.35,'$\Delta$RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'$\Delta$Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotRelativePositionsOn24All.eps')
def plotveldrall():
figure(figsize=[9,6])
clf()
subplots_adjust(wspace=.35,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotveldr()
ax=gca()
text(-.75,-.35,'dr (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-3.1,1.9,'$V_r$ (km/s)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotVeldrAll.eps')
def plotveldron24all():
figure(figsize=[9,9])
clf()
subplots_adjust(wspace=.35,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotveldron24()
ax=gca()
text(-.75,-.35,'$\Delta$r (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-3.1,1.9,'$V_r$ (km/s)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotVeldrAllOn24.eps')
def checkmorphall():
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
cl.checkmorph()
def getSpirals():
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
cl.getFilesForProfileFitting()
#plotpositionsall()
#plotpositionson24all()
#plotveldrall()
#getSpirals()
#mkw11.fitprofiles()
| gpl-3.0 | 1,974,420,544,544,963,800 | 37.925926 | 202 | 0.580919 | false |
Endika/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/test_mixed_modulestore.py | 7 | 136521 | """
Unit tests for the Mixed Modulestore, with DDT for the various stores (Split, Draft, XML)
"""
from collections import namedtuple
import datetime
import logging
import ddt
import itertools
import mimetypes
from uuid import uuid4
from contextlib import contextmanager
from mock import patch, Mock, call
# Mixed modulestore depends on django, so we'll manually configure some django settings
# before importing the module
# TODO remove this import and the configuration -- xmodule should not depend on django!
from django.conf import settings
# This import breaks this test file when run separately. Needs to be fixed! (PLAT-449)
from nose.plugins.attrib import attr
import pymongo
from pytz import UTC
from shutil import rmtree
from tempfile import mkdtemp
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.tests.utils import MongoContentstoreBuilder
from xmodule.contentstore.content import StaticContent
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
if not settings.configured:
settings.configure()
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator, LibraryLocator
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import UnsupportedRevisionError, DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError, NoPathToItem
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.search import path_to_location, navigation_index
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.modulestore.tests.factories import check_mongo_calls, check_exact_number_of_calls, \
mongo_uses_error_check
from xmodule.modulestore.tests.utils import create_modulestore_instance, LocationMixin, mock_tab_from_json
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.tests import DATA_DIR, CourseComparisonTest
log = logging.getLogger(__name__)
class CommonMixedModuleStoreSetup(CourseComparisonTest):
"""
Quasi-superclass which tests Location based apps against both split and mongo dbs (Locator and
Location-based dbs)
"""
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
ASSET_COLLECTION = 'assetstore'
FS_ROOT = DATA_DIR
DEFAULT_CLASS = 'xmodule.raw_module.RawDescriptor'
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': ''
MONGO_COURSEID = 'MITx/999/2013_Spring'
XML_COURSEID1 = 'edX/toy/2012_Fall'
XML_COURSEID2 = 'edX/simple/2012_Fall'
BAD_COURSE_ID = 'edX/simple'
modulestore_options = {
'default_class': DEFAULT_CLASS,
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
'asset_collection': ASSET_COLLECTION,
}
MAPPINGS = {
XML_COURSEID1: 'xml',
XML_COURSEID2: 'xml',
BAD_COURSE_ID: 'xml',
}
OPTIONS = {
'stores': [
{
'NAME': ModuleStoreEnum.Type.mongo,
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': ModuleStoreEnum.Type.split,
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': ModuleStoreEnum.Type.xml,
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'xblock_mixins': modulestore_options['xblock_mixins'],
}
},
],
'xblock_mixins': modulestore_options['xblock_mixins'],
}
def _compare_ignore_version(self, loc1, loc2, msg=None):
"""
AssertEqual replacement for CourseLocator
"""
if loc1.for_branch(None) != loc2.for_branch(None):
self.fail(self._formatMessage(msg, u"{} != {}".format(unicode(loc1), unicode(loc2))))
def setUp(self):
"""
Set up the database for testing
"""
super(CommonMixedModuleStoreSetup, self).setUp()
self.exclude_field(None, 'wiki_slug')
self.exclude_field(None, 'xml_attributes')
self.exclude_field(None, 'parent')
self.ignore_asset_key('_id')
self.ignore_asset_key('uploadDate')
self.ignore_asset_key('content_son')
self.ignore_asset_key('thumbnail_location')
self.options = getattr(self, 'options', self.OPTIONS)
self.connection = pymongo.MongoClient(
host=self.HOST,
port=self.PORT,
tz_aware=True,
)
self.connection.drop_database(self.DB)
self.addCleanup(self.connection.drop_database, self.DB)
self.addCleanup(self.connection.close)
self.addTypeEqualityFunc(BlockUsageLocator, '_compare_ignore_version')
self.addTypeEqualityFunc(CourseLocator, '_compare_ignore_version')
# define attrs which get set in initdb to quell pylint
self.writable_chapter_location = self.store = self.fake_location = self.xml_chapter_location = None
self.course_locations = {}
self.user_id = ModuleStoreEnum.UserID.test
# pylint: disable=invalid-name
def _create_course(self, course_key):
"""
Create a course w/ one item in the persistence store using the given course & item location.
"""
# create course
with self.store.bulk_operations(course_key):
self.course = self.store.create_course(course_key.org, course_key.course, course_key.run, self.user_id)
if isinstance(self.course.id, CourseLocator):
self.course_locations[self.MONGO_COURSEID] = self.course.location
else:
self.assertEqual(self.course.id, course_key)
# create chapter
chapter = self.store.create_child(self.user_id, self.course.location, 'chapter', block_id='Overview')
self.writable_chapter_location = chapter.location
def _create_block_hierarchy(self):
"""
Creates a hierarchy of blocks for testing
Each block's (version_agnostic) location is assigned as a field of the class and can be easily accessed
"""
BlockInfo = namedtuple('BlockInfo', 'field_name, category, display_name, sub_tree')
trees = [
BlockInfo(
'chapter_x', 'chapter', 'Chapter_x', [
BlockInfo(
'sequential_x1', 'sequential', 'Sequential_x1', [
BlockInfo(
'vertical_x1a', 'vertical', 'Vertical_x1a', [
BlockInfo('problem_x1a_1', 'problem', 'Problem_x1a_1', []),
BlockInfo('problem_x1a_2', 'problem', 'Problem_x1a_2', []),
BlockInfo('problem_x1a_3', 'problem', 'Problem_x1a_3', []),
BlockInfo('html_x1a_1', 'html', 'HTML_x1a_1', []),
]
),
BlockInfo(
'vertical_x1b', 'vertical', 'Vertical_x1b', []
)
]
),
BlockInfo(
'sequential_x2', 'sequential', 'Sequential_x2', []
)
]
),
BlockInfo(
'chapter_y', 'chapter', 'Chapter_y', [
BlockInfo(
'sequential_y1', 'sequential', 'Sequential_y1', [
BlockInfo(
'vertical_y1a', 'vertical', 'Vertical_y1a', [
BlockInfo('problem_y1a_1', 'problem', 'Problem_y1a_1', []),
BlockInfo('problem_y1a_2', 'problem', 'Problem_y1a_2', []),
BlockInfo('problem_y1a_3', 'problem', 'Problem_y1a_3', []),
]
)
]
)
]
)
]
def create_sub_tree(parent, block_info):
"""
recursive function that creates the given block and its descendants
"""
block = self.store.create_child(
self.user_id, parent.location,
block_info.category, block_id=block_info.display_name,
fields={'display_name': block_info.display_name},
)
for tree in block_info.sub_tree:
create_sub_tree(block, tree)
setattr(self, block_info.field_name, block.location)
with self.store.bulk_operations(self.course.id):
for tree in trees:
create_sub_tree(self.course, tree)
def _course_key_from_string(self, string):
"""
Get the course key for the given course string
"""
return self.course_locations[string].course_key
def _has_changes(self, location):
"""
Helper function that loads the item before calling has_changes
"""
return self.store.has_changes(self.store.get_item(location))
# pylint: disable=dangerous-default-value
def _initialize_mixed(self, mappings=MAPPINGS, contentstore=None):
"""
initializes the mixed modulestore.
"""
self.store = MixedModuleStore(
contentstore, create_modulestore_instance=create_modulestore_instance,
mappings=mappings,
**self.options
)
self.addCleanup(self.store.close_all_connections)
def initdb(self, default):
"""
Initialize the database and create one test course in it
"""
# set the default modulestore
store_configs = self.options['stores']
for index in range(len(store_configs)):
if store_configs[index]['NAME'] == default:
if index > 0:
store_configs[index], store_configs[0] = store_configs[0], store_configs[index]
break
self._initialize_mixed()
# convert to CourseKeys
self.course_locations = {
course_id: CourseLocator.from_string(course_id)
for course_id in [self.MONGO_COURSEID, self.XML_COURSEID1, self.XML_COURSEID2]
}
# and then to the root UsageKey
self.course_locations = {
course_id: course_key.make_usage_key('course', course_key.run)
for course_id, course_key in self.course_locations.iteritems()
}
mongo_course_key = self.course_locations[self.MONGO_COURSEID].course_key
self.fake_location = self.store.make_course_key(mongo_course_key.org, mongo_course_key.course, mongo_course_key.run).make_usage_key('vertical', 'fake')
self.xml_chapter_location = self.course_locations[self.XML_COURSEID1].replace(
category='chapter', name='Overview'
)
self._create_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEquals(default, self.store.get_modulestore_type(self.course.id))
@ddt.ddt
@attr('mongo')
class TestMixedModuleStore(CommonMixedModuleStoreSetup):
"""
Tests of the MixedModulestore interface methods.
"""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_type(self, default_ms):
"""
Make sure we get back the store type we expect for given mappings
"""
self.initdb(default_ms)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID1)), ModuleStoreEnum.Type.xml
)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID2)), ModuleStoreEnum.Type.xml
)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.MONGO_COURSEID)), default_ms
)
# try an unknown mapping, it should be the 'default' store
self.assertEqual(self.store.get_modulestore_type(
SlashSeparatedCourseKey('foo', 'bar', '2012_Fall')), default_ms
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_cache(self, default_ms):
"""
Make sure we cache discovered course mappings
"""
self.initdb(default_ms)
# unset mappings
self.store.mappings = {}
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with check_exact_number_of_calls(self.store.default_modulestore, 'has_course', 1):
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
self.assertIn(course_key, self.store.mappings)
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False)
))
@ddt.unpack
def test_duplicate_course_error(self, default_ms, reset_mixed_mappings):
"""
Make sure we get back the store type we expect for given mappings
"""
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
if reset_mixed_mappings:
self.store.mappings = {}
with self.assertRaises(DuplicateCourseError):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
# Draft:
# problem: One lookup to locate an item that exists
# fake: one w/ wildcard version
# split has one lookup for the course and then one for the course items
@ddt.data((ModuleStoreEnum.Type.mongo, [1, 1], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_has_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertTrue(self.store.has_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
# try negative cases
self.assertFalse(self.store.has_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertFalse(self.store.has_item(self.fake_location))
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.has_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# draft queries:
# problem: find draft item, find all items pertinent to inheritance computation, find parent
# non-existent problem: find draft, find published
# split:
# problem: active_versions, structure
# non-existent problem: ditto
@ddt.data((ModuleStoreEnum.Type.mongo, [3, 2], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_get_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertIsNotNone(self.store.get_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertIsNotNone(self.store.get_item(self.problem_x1a_1))
# try negative cases
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
)
with check_mongo_calls(max_find.pop(0), max_send):
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.fake_location)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Draft:
# wildcard query, 6! load pertinent items for inheritance calls, load parents, course root fetch (why)
# Split:
# active_versions (with regex), structure, and spurious active_versions refetch
@ddt.data((ModuleStoreEnum.Type.mongo, 14, 0), (ModuleStoreEnum.Type.split, 3, 0))
@ddt.unpack
def test_get_items(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
course_locn = self.course_locations[self.XML_COURSEID1]
# NOTE: use get_course if you just want the course. get_items is expensive
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'course'})
self.assertEqual(len(modules), 1)
self.assertEqual(modules[0].location, course_locn)
course_locn = self.course_locations[self.MONGO_COURSEID]
with check_mongo_calls(max_find, max_send):
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'problem'})
self.assertEqual(len(modules), 6)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_items(
self.course_locations[self.MONGO_COURSEID].course_key,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
@ddt.data((ModuleStoreEnum.Type.split, 2, False), (ModuleStoreEnum.Type.mongo, 3, True))
@ddt.unpack
def test_get_items_include_orphans(self, default_ms, expected_items_in_tree, orphan_in_items):
"""
Test `include_orphans` option helps in returning only those items which are present in course tree.
It tests that orphans are not fetched when calling `get_item` with `include_orphans`.
Params:
expected_items_in_tree:
Number of items that will be returned after `get_items` would be called with `include_orphans`.
In split, it would not get orphan items.
In mongo, it would still get orphan items because `include_orphans` would not have any impact on mongo
modulestore which will return same number of items as called without `include_orphans` kwarg.
orphan_in_items:
When `get_items` is called with `include_orphans` kwarg, then check if an orphan is returned or not.
False when called in split modulestore because in split get_items is expected to not retrieve orphans
now because of `include_orphans`.
True when called in mongo modulstore because `include_orphans` does not have any effect on mongo.
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
course_key = test_course.id
items = self.store.get_items(course_key)
# Check items found are either course or about type
self.assertTrue(set(['course', 'about']).issubset(set([item.location.block_type for item in items])))
# Assert that about is a detached category found in get_items
self.assertIn(
[item.location.block_type for item in items if item.location.block_type == 'about'][0],
DETACHED_XBLOCK_TYPES
)
self.assertEqual(len(items), 2)
# Check that orphans are not found
orphans = self.store.get_orphans(course_key)
self.assertEqual(len(orphans), 0)
# Add an orphan to test course
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(self.user_id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Check that now an orphan is found
orphans = self.store.get_orphans(course_key)
self.assertIn(orphan, orphans)
self.assertEqual(len(orphans), 1)
# Check now `get_items` retrieves an extra item added above which is an orphan.
items = self.store.get_items(course_key)
self.assertIn(orphan, [item.location for item in items])
self.assertEqual(len(items), 3)
# Check now `get_items` with `include_orphans` kwarg does not retrieves an orphan block.
items_in_tree = self.store.get_items(course_key, include_orphans=False)
# Check that course and about blocks are found in get_items
self.assertTrue(set(['course', 'about']).issubset(set([item.location.block_type for item in items_in_tree])))
# Check orphan is found or not - this is based on mongo/split modulestore. It should be found in mongo.
self.assertEqual(orphan in [item.location for item in items_in_tree], orphan_in_items)
self.assertEqual(len(items_in_tree), expected_items_in_tree)
# draft: get draft, get ancestors up to course (2-6), compute inheritance
# sends: update problem and then each ancestor up to course (edit info)
# split: active_versions, definitions (calculator field), structures
# 2 sends to update index & structure (note, it would also be definition if a content field changed)
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 5), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_update_item(self, default_ms, max_find, max_send):
"""
Update should fail for r/o dbs and succeed for r/w ones
"""
self.initdb(default_ms)
self._create_block_hierarchy()
course = self.store.get_course(self.course_locations[self.XML_COURSEID1].course_key)
# if following raised, then the test is really a noop, change it
self.assertFalse(course.show_calculator, "Default changed making test meaningless")
course.show_calculator = True
with self.assertRaises(NotImplementedError): # ensure it doesn't allow writing
self.store.update_item(course, self.user_id)
# now do it for a r/w db
problem = self.store.get_item(self.problem_x1a_1)
# if following raised, then the test is really a noop, change it
self.assertNotEqual(problem.max_attempts, 2, "Default changed making test meaningless")
problem.max_attempts = 2
with check_mongo_calls(max_find, max_send):
problem = self.store.update_item(problem, self.user_id)
self.assertEqual(problem.max_attempts, 2, "Update didn't persist")
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_direct_only(self, default_ms):
"""
Tests that has_changes() returns false when a new xblock in a direct only category is checked
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create dummy direct only xblocks
chapter = self.store.create_item(
self.user_id,
test_course.id,
'chapter',
block_id='vertical_container'
)
# Check that neither xblock has changes
self.assertFalse(self.store.has_changes(test_course))
self.assertFalse(self.store.has_changes(chapter))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes(self, default_ms):
"""
Tests that has_changes() only returns true when changes are present
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
newXBlock = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(newXBlock))
# Change the component, then check that there now are changes
component = self.store.get_item(xblock.location)
component.display_name = 'Changed Display Name'
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_draft_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return false if draft has no changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
self.assertFalse(self.store.has_changes(component))
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return true if draft has changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
# Discard changes and verify that there are no changes
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
self.assertFalse(self.store.has_changes(component))
# Change the component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
# Verify that changes are present
self.assertTrue(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode_after_delete(self, default_ms):
"""
Test that a unit does not get stuck in published mode
after discarding a component changes and deleting a component
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy vertical & html component to test against
vertical = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
component = self.store.create_child(
self.user_id,
vertical.location,
'html',
block_id='html_component'
)
# publish vertical changes
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# Change a component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
self.assertTrue(self._has_changes(vertical.location))
# Discard changes and verify that there are no changes
self.store.revert_to_published(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# Delete the component and verify that the unit has changes
self.store.delete_item(component.location, self.user_id)
vertical = self.store.get_item(vertical.location)
self.assertTrue(self._has_changes(vertical.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_publish_automatically_after_delete_unit(self, default_ms):
"""
Check that sequential publishes automatically after deleting a unit
"""
self.initdb(default_ms)
test_course = self.store.create_course('test_org', 'test_course', 'test_run', self.user_id)
# create sequential and vertical to test against
sequential = self.store.create_child(self.user_id, test_course.location, 'sequential', 'test_sequential')
vertical = self.store.create_child(self.user_id, sequential.location, 'vertical', 'test_vertical')
# publish sequential changes
self.store.publish(sequential.location, self.user_id)
self.assertFalse(self._has_changes(sequential.location))
# delete vertical and check sequential has no changes
self.store.delete_item(vertical.location, self.user_id)
self.assertFalse(self._has_changes(sequential.location))
def setup_has_changes(self, default_ms):
"""
Common set up for has_changes tests below.
Returns a dictionary of useful location maps for testing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
locations = {
'grandparent': self.chapter_x,
'parent_sibling': self.sequential_x2,
'parent': self.sequential_x1,
'child_sibling': self.vertical_x1b,
'child': self.vertical_x1a,
}
# Publish the vertical units
self.store.publish(locations['parent_sibling'], self.user_id)
self.store.publish(locations['parent'], self.user_id)
return locations
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_ancestors(self, default_ms):
"""
Tests that has_changes() returns true on ancestors when a child is changed
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change the child
child = self.store.get_item(locations['child'])
child.display_name = 'Changed Display Name'
self.store.update_item(child, self.user_id)
# All ancestors should have changes, but not siblings
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
self.assertTrue(self._has_changes(locations['child']))
self.assertFalse(self._has_changes(locations['parent_sibling']))
self.assertFalse(self._has_changes(locations['child_sibling']))
# Publish the unit with changes
self.store.publish(locations['parent'], self.user_id)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_publish_ancestors(self, default_ms):
"""
Tests that has_changes() returns false after a child is published only if all children are unchanged
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change both children
child = self.store.get_item(locations['child'])
child_sibling = self.store.get_item(locations['child_sibling'])
child.display_name = 'Changed Display Name'
child_sibling.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
self.store.update_item(child_sibling, user_id=self.user_id)
# Verify that ancestors have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish one child
self.store.publish(locations['child_sibling'], self.user_id)
# Verify that ancestors still have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish the other child
self.store.publish(locations['child'], self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_add_remove_child(self, default_ms):
"""
Tests that has_changes() returns true for the parent when a child with changes is added
and false when that child is removed.
"""
locations = self.setup_has_changes(default_ms)
# Test that the ancestors don't have changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
# Create a new child and attach it to parent
self.store.create_child(
self.user_id,
locations['parent'],
'vertical',
block_id='new_child',
)
# Verify that the ancestors now have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Remove the child from the parent
parent = self.store.get_item(locations['parent'])
parent.children = [locations['child'], locations['child_sibling']]
self.store.update_item(parent, user_id=self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_non_direct_only_children(self, default_ms):
"""
Tests that has_changes() returns true after editing the child of a vertical (both not direct only categories).
"""
self.initdb(default_ms)
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
child = self.store.create_child(
self.user_id,
parent.location,
'html',
block_id='child',
)
self.store.publish(parent.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(parent.location))
self.assertFalse(self._has_changes(child.location))
# Change the child
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
# Verify that both parent and child have changes
self.assertTrue(self._has_changes(parent.location))
self.assertTrue(self._has_changes(child.location))
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(ModuleStoreEnum.Branch.draft_preferred, ModuleStoreEnum.Branch.published_only)
))
@ddt.unpack
def test_has_changes_missing_child(self, default_ms, default_branch):
"""
Tests that has_changes() does not throw an exception when a child doesn't exist.
"""
self.initdb(default_ms)
with self.store.branch_setting(default_branch, self.course.id):
# Create the parent and point it to a fake child
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
parent.children += [self.course.id.make_usage_key('vertical', 'does_not_exist')]
parent = self.store.update_item(parent, self.user_id)
# Check the parent for changes should return True and not throw an exception
self.assertTrue(self.store.has_changes(parent))
# Draft
# Find: find parents (definition.children query), get parent, get course (fill in run?),
# find parents of the parent (course), get inheritance items,
# get item (to delete subtree), get inheritance again.
# Sends: delete item, update parent
# Split
# Find: active_versions, 2 structures (published & draft), definition (unnecessary)
# Sends: updated draft and published structures and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 2), (ModuleStoreEnum.Type.split, 3, 3))
@ddt.unpack
def test_delete_item(self, default_ms, max_find, max_send):
"""
Delete should reject on r/o db and work on r/w one
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
# r/o try deleting the chapter (is here to ensure it can't be deleted)
with self.assertRaises(NotImplementedError):
self.store.delete_item(self.xml_chapter_location, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.writable_chapter_location.course_key):
with check_mongo_calls(max_find, max_send):
self.store.delete_item(self.writable_chapter_location, self.user_id)
# verify it's gone
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location)
# verify it's gone from published too
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location, revision=ModuleStoreEnum.RevisionOption.published_only)
# Draft:
# queries: find parent (definition.children), count versions of item, get parent, count grandparents,
# inheritance items, draft item, draft child, inheritance
# sends: delete draft vertical and update parent
# Split:
# queries: active_versions, draft and published structures, definition (unnecessary)
# sends: update published (why?), draft, and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 9, 2), (ModuleStoreEnum.Type.split, 4, 3))
@ddt.unpack
def test_delete_private_vertical(self, default_ms, max_find, max_send):
"""
Because old mongo treated verticals as the first layer which could be draft, it has some interesting
behavioral properties which this deletion test gets at.
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID],
'vertical', block_id='private'
)
private_leaf = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, private_vert.location, 'html', block_id='private_leaf'
)
# verify pre delete state (just to verify that the test is valid)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(private_leaf.location.version_guid)
else:
vert_loc = private_vert.location
self.assertTrue(self.store.has_item(vert_loc))
self.assertTrue(self.store.has_item(private_leaf.location))
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
self.assertIn(vert_loc, course.children)
# delete the vertical and ensure the course no longer points to it
with check_mongo_calls(max_find, max_send):
self.store.delete_item(vert_loc, self.user_id)
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(course.location.version_guid)
leaf_loc = private_leaf.location.for_version(course.location.version_guid)
else:
vert_loc = private_vert.location
leaf_loc = private_leaf.location
self.assertFalse(self.store.has_item(vert_loc))
self.assertFalse(self.store.has_item(leaf_loc))
self.assertNotIn(vert_loc, course.children)
# Draft:
# find: find parent (definition.children) 2x, find draft item, get inheritance items
# send: one delete query for specific item
# Split:
# find: active_version & structure (cached)
# send: update structure and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 4, 1), (ModuleStoreEnum.Type.split, 2, 2))
@ddt.unpack
def test_delete_draft_vertical(self, default_ms, max_find, max_send):
"""
Test deleting a draft vertical which has a published version.
"""
self.initdb(default_ms)
# reproduce bug STUD-1965
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID], 'vertical', block_id='publish'
)
private_leaf = self.store.create_child(
self.user_id, private_vert.location, 'html', block_id='bug_leaf'
)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.delete_item(
private_leaf.location,
self.user_id,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
self.store.publish(private_vert.location, self.user_id)
private_leaf.display_name = 'change me'
private_leaf = self.store.update_item(private_leaf, self.user_id)
# test succeeds if delete succeeds w/o error
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
with check_mongo_calls(max_find, max_send):
self.store.delete_item(private_leaf.location, self.user_id)
# Draft:
# 1) find all courses (wildcard),
# 2) get each course 1 at a time (1 course),
# 3) wildcard split if it has any (1) but it doesn't
# Split:
# 1) wildcard split search,
# 2-4) active_versions, structure, definition (s/b lazy; so, unnecessary)
# 5) wildcard draft mongo which has none
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0), (ModuleStoreEnum.Type.split, 5, 0))
@ddt.unpack
def test_get_courses(self, default_ms, max_find, max_send):
self.initdb(default_ms)
# we should have 3 total courses across all stores
with check_mongo_calls(max_find, max_send):
courses = self.store.get_courses()
course_ids = [course.location for course in courses]
self.assertEqual(len(courses), 3, "Not 3 courses: {}".format(course_ids))
self.assertIn(self.course_locations[self.MONGO_COURSEID], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID1], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2], course_ids)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
draft_courses = self.store.get_courses(remove_branch=True)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
published_courses = self.store.get_courses(remove_branch=True)
self.assertEquals([c.id for c in draft_courses], [c.id for c in published_courses])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_child_detached_tabs(self, default_ms):
"""
test 'create_child' method with a detached category ('static_tab')
to check that new static tab is not a direct child of the course
"""
self.initdb(default_ms)
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
# create a static tab of the course
self.store.create_child(
self.user_id,
self.course.location,
'static_tab'
)
# now check that the course has same number of children
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
def test_xml_get_courses(self):
"""
Test that the xml modulestore only loaded the courses from the maps.
"""
self.initdb(ModuleStoreEnum.Type.mongo)
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
courses = xml_store.get_courses()
self.assertEqual(len(courses), 2)
course_ids = [course.id for course in courses]
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)
# this course is in the directory from which we loaded courses but not in the map
self.assertNotIn("edX/toy/TT_2012_Fall", course_ids)
def test_xml_no_write(self):
"""
Test that the xml modulestore doesn't allow write ops.
"""
self.initdb(ModuleStoreEnum.Type.mongo)
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
# the important thing is not which exception it raises but that it raises an exception
with self.assertRaises(AttributeError):
xml_store.create_course("org", "course", "run", self.user_id)
# draft is 2: find out which ms owns course, get item
# split: active_versions, structure, definition (to load course wiki string)
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 3, 0))
@ddt.unpack
def test_get_course(self, default_ms, max_find, max_send):
"""
This test is here for the performance comparison not functionality. It tests the performance
of getting an item whose scope.content fields are looked at.
"""
self.initdb(default_ms)
with check_mongo_calls(max_find, max_send):
course = self.store.get_item(self.course_locations[self.MONGO_COURSEID])
self.assertEqual(course.id, self.course_locations[self.MONGO_COURSEID].course_key)
course = self.store.get_item(self.course_locations[self.XML_COURSEID1])
self.assertEqual(course.id, self.course_locations[self.XML_COURSEID1].course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_library(self, default_ms):
"""
Test that create_library and get_library work regardless of the default modulestore.
Other tests of MixedModulestore support are in test_libraries.py but this one must
be done here so we can test the configuration where Draft/old is the first modulestore.
"""
self.initdb(default_ms)
with self.store.default_store(ModuleStoreEnum.Type.split): # The CMS also wraps create_library like this
library = self.store.create_library("org", "lib", self.user_id, {"display_name": "Test Library"})
library_key = library.location.library_key
self.assertIsInstance(library_key, LibraryLocator)
# Now load with get_library and make sure it works:
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# Clear the mappings so we can test get_library code path without mapping set:
self.store.mappings.clear()
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# notice this doesn't test getting a public item via draft_preferred which draft would have 2 hits (split
# still only 2)
# Draft: get_parent
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_parent_locations(self, default_ms, max_find, max_send):
"""
Test a simple get parent for a direct only category (i.e, always published)
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find, max_send):
parent = self.store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
parent = self.store.get_parent_location(self.xml_chapter_location)
self.assertEqual(parent, self.course_locations[self.XML_COURSEID1])
def verify_get_parent_locations_results(self, expected_results):
"""
Verifies the results of calling get_parent_locations matches expected_results.
"""
for child_location, parent_location, revision in expected_results:
self.assertEqual(
parent_location,
self.store.get_parent_location(child_location, revision=revision)
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_parent_locations_moved_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.course = self.store.publish(self.course.location, self.user_id)
with self.store.bulk_operations(self.course.id):
# make drafts of verticals
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# move child problem_x1a_1 to vertical_y1a
child_to_move_location = self.problem_x1a_1
new_parent_location = self.vertical_y1a
old_parent_location = self.vertical_x1a
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
old_parent = self.store.get_item(child_to_move_location).get_parent()
self.assertEqual(old_parent_location, old_parent.location)
child_to_move_contextualized = child_to_move_location.map_into_course(old_parent.location.course_key)
old_parent.children.remove(child_to_move_contextualized)
self.store.update_item(old_parent, self.user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.assertEqual(new_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertEqual(old_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
old_parent_published_location = old_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, old_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
new_parent_published_location = new_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, new_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_locations_deleted_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.store.publish(self.course.location, self.user_id)
# make draft of vertical
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# delete child problem_y1a_1
child_to_delete_location = self.problem_y1a_1
old_parent_location = self.vertical_y1a
self.store.delete_item(child_to_delete_location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, old_parent_location, None),
# Note: The following could be an unexpected result, but we want to avoid an extra database call
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, None, None),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_location_draft(self, default_ms):
"""
Test that "get_parent_location" method returns first published parent
for a draft component, if it has many possible parents (including
draft parents).
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
# add another parent (unit) "vertical_x1b" for problem "problem_x1a_1"
mongo_store.collection.update(
self.vertical_x1b.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# convert first parent (unit) "vertical_x1a" of problem "problem_x1a_1" to draft
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
item = self.store.get_item(self.vertical_x1a)
self.assertTrue(self.store.has_published_version(item))
# now problem "problem_x1a_1" has 3 parents [vertical_x1a (draft),
# vertical_x1a (published), vertical_x1b (published)]
# check that "get_parent_location" method of draft branch returns first
# published parent "vertical_x1a" without raising "AssertionError" for
# problem location revision
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# Draft:
# Problem path:
# 1. Get problem
# 2-6. get parent and rest of ancestors up to course
# 7-8. get sequential, compute inheritance
# 8-9. get vertical, compute inheritance
# 10-11. get other vertical_x1b (why?) and compute inheritance
# Split: active_versions & structure
@ddt.data((ModuleStoreEnum.Type.mongo, [12, 3], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_path_to_location(self, default_ms, num_finds, num_sends):
"""
Make sure that path_to_location works
"""
self.initdb(default_ms)
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self._create_block_hierarchy()
should_work = (
(self.problem_x1a_2,
(course_key, u"Chapter_x", u"Sequential_x1", u'Vertical_x1a', '1', self.problem_x1a_2)),
(self.chapter_x,
(course_key, "Chapter_x", None, None, None, self.chapter_x)),
)
for location, expected in should_work:
# each iteration has different find count, pop this iter's find count
with check_mongo_calls(num_finds.pop(0), num_sends):
path = path_to_location(self.store, location)
self.assertEqual(path, expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
# Orphaned items should not be found.
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(
self.user_id,
orphan.course_key,
orphan.block_type,
block_id=orphan.block_id
)
with self.assertRaises(NoPathToItem):
path_to_location(self.store, orphan)
def test_xml_path_to_location(self):
"""
Make sure that path_to_location works: should be passed a modulestore
with the toy and simple courses loaded.
"""
# only needs course_locations set
self.initdb(ModuleStoreEnum.Type.mongo)
course_key = self.course_locations[self.XML_COURSEID1].course_key
video_key = course_key.make_usage_key('video', 'Welcome')
chapter_key = course_key.make_usage_key('chapter', 'Overview')
should_work = (
(video_key,
(course_key, "Overview", "Welcome", None, None, video_key)),
(chapter_key,
(course_key, "Overview", None, None, None, chapter_key)),
)
for location, expected in should_work:
self.assertEqual(path_to_location(self.store, location), expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
def test_navigation_index(self):
"""
Make sure that navigation_index correctly parses the various position values that we might get from calls to
path_to_location
"""
self.assertEqual(1, navigation_index("1"))
self.assertEqual(10, navigation_index("10"))
self.assertEqual(None, navigation_index(None))
self.assertEqual(1, navigation_index("1_2"))
self.assertEqual(5, navigation_index("5_2"))
self.assertEqual(7, navigation_index("7_3_5_6_"))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_draft(self, default_ms):
"""
Test calling revert_to_published on draft vertical.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
vertical = self.store.get_item(self.vertical_x1a)
vertical_children_num = len(vertical.children)
self.store.publish(self.course.location, self.user_id)
self.assertFalse(self._has_changes(self.vertical_x1a))
# delete leaf problem (will make parent vertical a draft)
self.store.delete_item(self.problem_x1a_1, self.user_id)
self.assertTrue(self._has_changes(self.vertical_x1a))
draft_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num - 1, len(draft_parent.children))
published_parent = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.assertBlocksEqualByFields(reverted_parent, published_parent)
self.assertFalse(self._has_changes(self.vertical_x1a))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_published(self, default_ms):
"""
Test calling revert_to_published on a published vertical with a draft child.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
problem = self.store.get_item(self.problem_x1a_1)
orig_display_name = problem.display_name
# Change display name of problem and update just it (so parent remains published)
problem.display_name = "updated before calling revert"
self.store.update_item(problem, self.user_id)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_problem = self.store.get_item(self.problem_x1a_1)
self.assertEqual(orig_display_name, reverted_problem.display_name)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_draft(self, default_ms):
"""
Test calling revert_to_published on vertical with no draft content does nothing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
orig_vertical = self.store.get_item(self.vertical_x1a)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_vertical = self.store.get_item(self.vertical_x1a)
self.assertBlocksEqualByFields(orig_vertical, reverted_vertical)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_published(self, default_ms):
"""
Test calling revert_to_published on vertical with no published version errors.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with self.assertRaises(InvalidVersionError):
self.store.revert_to_published(self.vertical_x1a, self.user_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_direct_only(self, default_ms):
"""
Test calling revert_to_published on a direct-only item is a no-op.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
num_children = len(self.store.get_item(self.sequential_x1).children)
self.store.revert_to_published(self.sequential_x1, self.user_id)
reverted_parent = self.store.get_item(self.sequential_x1)
# It does not discard the child vertical, even though that child is a draft (with no published version)
self.assertEqual(num_children, len(reverted_parent.children))
# Draft: get all items which can be or should have parents
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_orphans(self, default_ms, max_find, max_send):
"""
Test finding orphans.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
# orphans
orphan_locations = [
course_id.make_usage_key('chapter', 'OrphanChapter'),
course_id.make_usage_key('vertical', 'OrphanVertical'),
course_id.make_usage_key('problem', 'OrphanProblem'),
course_id.make_usage_key('html', 'OrphanHTML'),
]
# detached items (not considered as orphans)
detached_locations = [
course_id.make_usage_key('static_tab', 'StaticTab'),
course_id.make_usage_key('course_info', 'updates'),
]
for location in orphan_locations + detached_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
with check_mongo_calls(max_find, max_send):
found_orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertItemsEqual(found_orphans, orphan_locations)
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_non_orphan_parents(self, default_ms):
"""
Test finding non orphan parents from many possible parents.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
# test that problem "problem_x1a_1" has only one published parent
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# add some published orphans
orphan_sequential = course_id.make_usage_key('sequential', 'OrphanSequential')
orphan_vertical = course_id.make_usage_key('vertical', 'OrphanVertical')
orphan_locations = [orphan_sequential, orphan_vertical]
for location in orphan_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
self.store.publish(location, self.user_id)
found_orphans = mongo_store.get_orphans(course_id)
self.assertEqual(set(found_orphans), set(orphan_locations))
self.assertEqual(len(set(found_orphans)), 2)
# add orphan vertical and sequential as another parents of problem "problem_x1a_1"
mongo_store.collection.update(
orphan_sequential.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
mongo_store.collection.update(
orphan_vertical.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# test that "get_parent_location" method of published branch still returns the correct non-orphan parent for
# problem "problem_x1a_1" since the two other parents are orphans
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# now add valid published vertical as another parent of problem
mongo_store.collection.update(
self.sequential_x1.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# now check that "get_parent_location" method of published branch raises "ReferentialIntegrityError" for
# problem "problem_x1a_1" since it has now 2 valid published parents
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
with self.assertRaises(ReferentialIntegrityError):
self.store.get_parent_location(self.problem_x1a_1)
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_create_item_from_parent_location(self, default_ms):
"""
Test a code path missed by the above: passing an old-style location as parent but no
new location for the child
"""
self.initdb(default_ms)
self.store.create_child(
self.user_id,
self.course_locations[self.MONGO_COURSEID],
'problem',
block_id='orphan'
)
orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(orphans), 0, "unexpected orphans: {}".format(orphans))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.edited_on)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_subtree_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.subtree_edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.subtree_edited_on)
# Draft: wildcard search of draft and split
# Split: wildcard search of draft and split
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_courses_for_wiki(self, default_ms, max_find, max_send):
"""
Test the get_courses_for_wiki method
"""
self.initdb(default_ms)
# Test XML wikis
wiki_courses = self.store.get_courses_for_wiki('toy')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, wiki_courses)
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# Test Mongo wiki
with check_mongo_calls(max_find, max_send):
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
self.assertEqual(len(self.store.get_courses_for_wiki('edX.simple.2012_Fall')), 0)
self.assertEqual(len(self.store.get_courses_for_wiki('no_such_wiki')), 0)
# Draft:
# Find: find vertical, find children
# Sends:
# 1. delete all of the published nodes in subtree
# 2. insert vertical as published (deleted in step 1) w/ the deleted problems as children
# 3-6. insert the 3 problems and 1 html as published
# Split: active_versions, 2 structures (pre & post published?)
# Sends:
# - insert structure
# - write index entry
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 6), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_unpublish(self, default_ms, max_find, max_send):
"""
Test calling unpublish
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
self._create_block_hierarchy()
# publish
self.store.publish(self.course.location, self.user_id)
published_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertIsNotNone(published_xblock)
# unpublish
with check_mongo_calls(max_find, max_send):
self.store.unpublish(self.vertical_x1a, self.user_id)
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
# make sure draft version still exists
draft_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.draft_only
)
self.assertIsNotNone(draft_xblock)
# Draft: specific query for revision None
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_has_published_version(self, default_ms, max_find, max_send):
"""
Test the has_published_version method
"""
self.initdb(default_ms)
self._create_block_hierarchy()
# start off as Private
item = self.store.create_child(self.user_id, self.writable_chapter_location, 'problem', 'test_compute_publish_state')
item_location = item.location
with check_mongo_calls(max_find, max_send):
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Private
self.store.unpublish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Draft with NO changes
self.store.convert_to_draft(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Draft WITH changes
item.display_name = 'new name'
item = self.store.update_item(item, self.user_id)
self.assertTrue(self.store.has_changes(item))
self.assertTrue(self.store.has_published_version(item))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info_ancestors(self, default_ms):
"""
Tests that edited_on, edited_by, subtree_edited_on, and subtree_edited_by are set correctly during update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
def check_node(location_key, after, before, edited_by, subtree_after, subtree_before, subtree_by):
"""
Checks that the node given by location_key matches the given edit_info constraints.
"""
node = self.store.get_item(location_key)
if after:
self.assertLess(after, node.edited_on)
self.assertLess(node.edited_on, before)
self.assertEqual(node.edited_by, edited_by)
if subtree_after:
self.assertLess(subtree_after, node.subtree_edited_on)
self.assertLess(node.subtree_edited_on, subtree_before)
self.assertEqual(node.subtree_edited_by, subtree_by)
with self.store.bulk_operations(test_course.id):
# Create a dummy vertical & html to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
block_id='test_vertical'
)
child = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html'
)
sibling = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html_no_change'
)
after_create = datetime.datetime.now(UTC)
# Verify that all nodes were last edited in the past by create_user
for block in [component, child, sibling]:
check_node(block.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the component, then check that there now are changes
component.display_name = 'Changed Display Name'
editing_user = self.user_id - 2
with self.store.bulk_operations(test_course.id): # TNL-764 bulk ops disabled ancestor updates
component = self.store.update_item(component, editing_user)
after_edit = datetime.datetime.now(UTC)
check_node(component.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# but child didn't change
check_node(child.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the child
child = self.store.get_item(child.location)
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=editing_user)
after_edit = datetime.datetime.now(UTC)
# Verify that child was last edited between after_create and after_edit by edit_user
check_node(child.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# Verify that ancestors edit info is unchanged, but their subtree edit info matches child
check_node(test_course.location, None, after_create, self.user_id, after_create, after_edit, editing_user)
# Verify that others have unchanged edit info
check_node(sibling.location, None, after_create, self.user_id, None, after_create, self.user_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info(self, default_ms):
"""
Tests that edited_on and edited_by are set correctly during an update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current edit time and verify that user created the component
self.assertEqual(component.edited_by, self.user_id)
old_edited_on = component.edited_on
edit_user = self.user_id - 2
# Change the component
component.display_name = 'Changed'
self.store.update_item(component, edit_user)
updated_component = self.store.get_item(component.location)
# Verify the ordering of edit times and that dummy_user made the edit
self.assertLess(old_edited_on, updated_component.edited_on)
self.assertEqual(updated_component.edited_by, edit_user)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_published_info(self, default_ms):
"""
Tests that published_on and published_by are set correctly
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
publish_user = 456
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current time, then publish
old_time = datetime.datetime.now(UTC)
self.store.publish(component.location, publish_user)
updated_component = self.store.get_item(component.location)
# Verify the time order and that publish_user caused publication
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
# Verify that changing the item doesn't unset the published info
updated_component.display_name = 'changed'
self.store.update_item(updated_component, self.user_id)
updated_component = self.store.get_item(updated_component.location)
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_auto_publish(self, default_ms):
"""
Test that the correct things have been published automatically
Assumptions:
* we auto-publish courses, chapters, sequentials
* we don't auto-publish problems
"""
self.initdb(default_ms)
# test create_course to make sure we are autopublishing
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
self.assertTrue(self.store.has_published_version(test_course))
test_course_key = test_course.id
# test create_item of direct-only category to make sure we are autopublishing
chapter = self.store.create_child(self.user_id, test_course.location, 'chapter', 'Overview')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
chapter.location,
self.store.get_item(test_course.location).children,
)
self.assertTrue(self.store.has_published_version(chapter))
chapter_location = chapter.location
# test create_child of direct-only category to make sure we are autopublishing
sequential = self.store.create_child(self.user_id, chapter_location, 'sequential', 'Sequence')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
self.assertTrue(self.store.has_published_version(sequential))
# test update_item of direct-only category to make sure we are autopublishing
sequential.display_name = 'sequential1'
sequential = self.store.update_item(sequential, self.user_id)
self.assertTrue(self.store.has_published_version(sequential))
# test delete_item of direct-only category to make sure we are autopublishing
self.store.delete_item(sequential.location, self.user_id, revision=ModuleStoreEnum.RevisionOption.all)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertNotIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
# test create_child of NOT direct-only category to make sure we aren't autopublishing
problem_child = self.store.create_child(self.user_id, chapter_location, 'problem', 'Problem_Child')
self.assertFalse(self.store.has_published_version(problem_child))
# test create_item of NOT direct-only category to make sure we aren't autopublishing
problem_item = self.store.create_item(self.user_id, test_course_key, 'problem', 'Problem_Item')
self.assertFalse(self.store.has_published_version(problem_item))
# test update_item of NOT direct-only category to make sure we aren't autopublishing
problem_item.display_name = 'Problem_Item1'
problem_item = self.store.update_item(problem_item, self.user_id)
self.assertFalse(self.store.has_published_version(problem_item))
# test delete_item of NOT direct-only category to make sure we aren't autopublishing
self.store.delete_item(problem_child.location, self.user_id)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_courses_for_wiki_shared(self, default_ms):
"""
Test two courses sharing the same wiki
"""
self.initdb(default_ms)
# verify initial state - initially, we should have a wiki for the Mongo course
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
# set Mongo course to share the wiki with simple course
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'simple'
self.store.update_item(mongo_course, self.user_id)
# now mongo_course should not be retrievable with old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 0)
# but there should be two courses with wiki_slug 'simple'
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 2)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# configure mongo course to use unique wiki_slug.
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'MITx.999.2013_Spring'
self.store.update_item(mongo_course, self.user_id)
# it should be retrievable with its new wiki_slug
wiki_courses = self.store.get_courses_for_wiki('MITx.999.2013_Spring')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
# and NOT retriveable with its old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertNotIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(
self.course_locations[self.XML_COURSEID2].course_key,
wiki_courses
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_branch_setting(self, default_ms):
"""
Test the branch_setting context manager
"""
self.initdb(default_ms)
self._create_block_hierarchy()
problem_location = self.problem_x1a_1.for_branch(None)
problem_original_name = 'Problem_x1a_1'
course_key = problem_location.course_key
problem_new_name = 'New Problem Name'
def assertNumProblems(display_name, expected_number):
"""
Asserts the number of problems with the given display name is the given expected number.
"""
self.assertEquals(
len(self.store.get_items(course_key.for_branch(None), settings={'display_name': display_name})),
expected_number
)
def assertProblemNameEquals(expected_display_name):
"""
Asserts the display_name of the xblock at problem_location matches the given expected value.
"""
# check the display_name of the problem
problem = self.store.get_item(problem_location)
self.assertEquals(problem.display_name, expected_display_name)
# there should be only 1 problem with the expected_display_name
assertNumProblems(expected_display_name, 1)
# verify Draft problem
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Published problem doesn't exist
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertFalse(self.store.has_item(problem_location))
with self.assertRaises(ItemNotFoundError):
self.store.get_item(problem_location)
# PUBLISH the problem
self.store.publish(self.vertical_x1a, self.user_id)
self.store.publish(problem_location, self.user_id)
# verify Published problem
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Draft-preferred
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_original_name)
# EDIT name
problem = self.store.get_item(problem_location)
problem.display_name = problem_new_name
self.store.update_item(problem, self.user_id)
# verify Draft problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_new_name)
# verify Published problem still has old name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_original_name)
# there should be no published problems with the new name
assertNumProblems(problem_new_name, 0)
# PUBLISH the problem
self.store.publish(problem_location, self.user_id)
# verify Published problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_new_name)
# there should be no published problems with the old name
assertNumProblems(problem_original_name, 0)
def verify_default_store(self, store_type):
"""
Verifies the default_store property
"""
self.assertEquals(self.store.default_modulestore.get_modulestore_type(), store_type)
# verify internal helper method
store = self.store._get_modulestore_for_courselike() # pylint: disable=protected-access
self.assertEquals(store.get_modulestore_type(), store_type)
# verify store used for creating a course
try:
course = self.store.create_course("org", "course{}".format(uuid4().hex[:5]), "run", self.user_id)
self.assertEquals(course.system.modulestore.get_modulestore_type(), store_type)
except NotImplementedError:
self.assertEquals(store_type, ModuleStoreEnum.Type.xml)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.xml)
def test_default_store(self, default_ms):
"""
Test the default store context manager
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.verify_default_store(default_ms)
def test_default_store_nested(self):
"""
Test the default store context manager, nested within one another
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(ModuleStoreEnum.Type.mongo):
self.verify_default_store(ModuleStoreEnum.Type.mongo)
with self.store.default_store(ModuleStoreEnum.Type.split):
self.verify_default_store(ModuleStoreEnum.Type.split)
with self.store.default_store(ModuleStoreEnum.Type.xml):
self.verify_default_store(ModuleStoreEnum.Type.xml)
self.verify_default_store(ModuleStoreEnum.Type.split)
self.verify_default_store(ModuleStoreEnum.Type.mongo)
def test_default_store_fake(self):
"""
Test the default store context manager, asking for a fake store
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
fake_store = "fake"
with self.assertRaisesRegexp(Exception, "Cannot find store of type {}".format(fake_store)):
with self.store.default_store(fake_store):
pass # pragma: no cover
def save_asset(self, asset_key):
"""
Load and save the given file. (taken from test_contentstore)
"""
with open("{}/static/{}".format(DATA_DIR, asset_key.block_id), "rb") as f:
content = StaticContent(
asset_key, "Funky Pix", mimetypes.guess_type(asset_key.block_id)[0], f.read(),
)
self.store.contentstore.save(content)
@ddt.data(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.mongo],
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split],
[ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.split]
)
@ddt.unpack
def test_clone_course(self, source_modulestore, destination_modulestore):
"""
Test clone course
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(source_modulestore):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
self.save_asset(source_course_key.make_asset_key('asset', 'picture1.jpg'))
with self.store.default_store(destination_modulestore):
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(source_course_key, dest_course_id, self.user_id)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(source_modulestore)
dest_store = self.store._get_modulestore_by_type(destination_modulestore)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
def test_clone_xml_split(self):
"""
Can clone xml courses to split; so, test it.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={self.XML_COURSEID2: 'xml', })
source_course_key = CourseKey.from_string(self.XML_COURSEID2)
with self.store.default_store(ModuleStoreEnum.Type.split):
dest_course_id = CourseLocator("org.other", "course.other", "run.other")
self.store.clone_course(
source_course_key, dest_course_id, ModuleStoreEnum.UserID.test
)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml)
dest_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.split)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_operations_signal_firing(self, default):
""" Signals should be fired right before bulk_operations() exits. """
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
course_key = course.id
def _clear_bulk_ops_record(course_key): # pylint: disable=unused-argument
"""
Check if the signal has been fired.
The course_published signal fires before the _clear_bulk_ops_record.
"""
signal_handler.send.assert_called_with('course_published', course_key=course.id)
with patch.object(
self.store.thread_cache.default_store, '_clear_bulk_ops_record', wraps=_clear_bulk_ops_record
) as mock_clear_bulk_ops_record:
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
self.assertEqual(mock_clear_bulk_ops_record.call_count, 1)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. The block should be published with every change.
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
signal_handler.reset_mock()
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_rerun_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test course re-runs
signal_handler.reset_mock()
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(course_key, dest_course_id, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=dest_course_id)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_import_firing(self, default, _from_json):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Test course imports
# Note: The signal is fired once when the course is created and
# a second time after the actual data import.
import_course_from_xml(
self.store, self.user_id, DATA_DIR, ['toy'], load_error_modules=False,
static_content_store=contentstore,
create_if_not_present=True,
)
signal_handler.send.assert_has_calls([
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
section = self.store.create_item(self.user_id, course.id, 'chapter')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# 'units' and 'blocks' are draftable types
signal_handler.reset_mock()
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.reset_mock()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.unpublish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.delete_item(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. No signals should be received until
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
section = self.store.create_item(self.user_id, course_key, 'chapter')
signal_handler.send.assert_not_called()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_not_called()
# 'units' and 'blocks' are draftable types
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
self.store.unpublish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
self.store.delete_item(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test editing draftable block type without publish
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
signal_handler.send.assert_not_called()
unit.display_name = "Change this unit"
self.store.update_item(unit, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_not_called()
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_deleted_signal(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Create a course
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
course_key = course.id
# Delete the course
course = self.store.delete_course(course_key, self.user_id)
# Verify that the signal was emitted
signal_handler.send.assert_called_with('course_deleted', course_key=course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_published_item_orphans(self, default_store):
"""
Tests delete published item dont create any oprhans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(problem.location))
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
self.assertEqual(len(course_orphans), 0)
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates orphans
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_orphans), 1)
else:
self.assertEqual(len(course_orphans), 0)
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published oprhans after delete, except
# in old mongo, which still creates orphans
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_publish_orphans), 1)
else:
self.assertEqual(len(course_publish_orphans), 0)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_draft_item_orphans(self, default_store):
"""
Tests delete draft item create no orphans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(problem.location))
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
self.assertEqual(len(course_orphans), 0)
problem.display_name = 'changed'
problem = self.store.update_item(problem, self.user_id)
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(problem.location))
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates them
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_orphans), 1)
else:
self.assertEqual(len(course_orphans), 0)
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published orphans after delete, except
# in old mongo, which still creates them
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_publish_orphans), 1)
else:
self.assertEqual(len(course_publish_orphans), 0)
@ddt.ddt
@attr('mongo')
class TestPublishOverExportImport(CommonMixedModuleStoreSetup):
"""
Tests which publish (or don't publish) items - and then export/import the course,
checking the state of the imported items.
"""
def setUp(self):
"""
Set up the database for testing
"""
super(TestPublishOverExportImport, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
def _export_import_course_round_trip(self, modulestore, contentstore, source_course_key, export_dir):
"""
Export the course from a modulestore and then re-import the course.
"""
top_level_export_dir = 'exported_source_course'
export_course_to_xml(
modulestore,
contentstore,
source_course_key,
export_dir,
top_level_export_dir,
)
import_course_from_xml(
modulestore,
'test_user',
export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@contextmanager
def _build_store(self, default_ms):
"""
Perform the modulestore-building and course creation steps for a mixed modulestore test.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(default_ms):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
yield contentstore, source_course_key
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_draft_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an unpublished unit remains with no changes across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and don't publish it.
draft_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self._has_changes(draft_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Verify that the imported block still is a draft, i.e. has changes.
self.assertTrue(self._has_changes(draft_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that it still is published, i.e. has no changes.
self.assertFalse(self._has_changes(published_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_changed_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
updated_display_name = 'Changed Display Name'
component = self.store.get_item(published_xblock.location)
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(published_xblock.location))
# Verify that the changes in the draft vertical still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(published_xblock.location)
self.assertEqual(component.display_name, updated_display_name)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_seq_with_unpublished_vertical_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical - don't publish it!
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Retrieve the published block and make sure it's published.
# Chapter is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(chapter.location))
# Sequential is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(sequential.location))
# Vertical is unpublished - so it "has_changes".
self.assertTrue(self._has_changes(vertical.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_draft_and_published_unit_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Vertical has a new child -and- unit is unpublished. So both have changes.
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Publishing the unit separately has no effect on whether it has changes - it's already published.
self.store.publish(unit.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Retrieve the published block and make sure it's published.
self.store.publish(chapter.location, self.user_id)
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Now make changes to the unit - but don't publish them.
component = self.store.get_item(unit.location)
updated_display_name = 'Changed Display Name'
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self._has_changes(component.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Verify that the changes in the draft unit still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
# Verify that the draft changes don't exist in the published unit - it still uses the default name.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, 'Text')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_published_unit_remains_published_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Now make changes to the unit.
updated_display_name = 'Changed Display Name'
unit.display_name = updated_display_name
unit = self.store.update_item(unit, self.user_id)
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Verify that the published changes exist in the published unit.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
| agpl-3.0 | 8,417,558,621,369,573,000 | 44.812416 | 159 | 0.635243 | false |
openshift/openshift-tools | scripts/monitoring/cron-send-os-skydns-checks.py | 12 | 6971 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Send Openshift Master SkyDNS metric checks to Zagg
Openshift uses SkyDNS to locate services inside of the cluster.
Openshift implements SkyDNS a bit different. Normally SkyDNS uses etcd as a backend
for the DNS data to be stored. Openshift uses a special SkyDNS provider to map
Openshift services to IP's. More info can be found by looking at the source code here:
https://github.com/openshift/origin/blob/master/pkg/dns/serviceresolver.go
In short, the Openshift service name has a name, namespace and IP. The custom provider
can return many different variations of these for services, endpoints, an ports. The
variation that is used within this script is in the form of:
<name>.<namespace>.svc.cluster.local
This can be tested manually with dig from the command line the form:
$ dig @<nameserver> <name>.<namespace>.svc.cluster.local A
In this script, I am assuming that each Openshift service will have one and only one IP.
This *could* change and we will need to examine each of the IP's returned from Openshift
and SkyDNS
'''
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
# Accepting general Exceptions
#pylint: disable=broad-except
# Bot doesn't support openshift_tools libs
#pylint: disable=import-error
import argparse
from dns import resolver
from dns import exception as dns_exception
from openshift_tools.web.openshift_rest_api import OpenshiftRestApi
from openshift_tools.monitoring.metric_sender import MetricSender
import socket
import sys
class OpenshiftSkyDNSZaggClient(object):
""" Checks for the Openshift Master SkyDNS """
def __init__(self):
self.args = None
self.metric_sender = None
self.ora = OpenshiftRestApi()
self.dns_host = ''
self.dns_port = 53
self.openshift_services = []
def run(self):
""" Main function to run the check """
self.parse_args()
self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)
self.get_openshift_services()
dns_host = [i for i in self.openshift_services if i['name'] == 'kubernetes' and i['namespace'] == 'default']
if len(dns_host) == 1:
self.dns_host = dns_host[0]['ip']
else:
print "\nUnable to find SKY DNS server."
print "Please run \"oc get services -n default\" to locate kubernetes service"
sys.exit(1)
if self.check_dns_port_alive():
self.do_dns_check()
self.metric_sender.send_metrics()
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Network metric sender')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
self.args = parser.parse_args()
def check_dns_port_alive(self):
""" Verify that the DNS port (TCP 53) is alive """
print "\nPerforming Openshift DNS port check..."
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((self.dns_host, self.dns_port))
s.close()
print "\nOpenshift SkyDNS host: %s, port: %s is OPEN" % (self.dns_host, self.dns_port)
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.port.open' : 1})
return True
except socket.error, e:
print "\nOpenshift SkyDNS host: %s, port: %s is CLOSED" % (self.dns_host, self.dns_port)
print "Python Error: %s" % e
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.port.open' : 0})
return False
def get_openshift_services(self):
""" Get a list of Openshift services that can be used to test against SkyDNS """
print "\nQuerying for Openshift services in the 'default' namespace...\n"
response = self.ora.get('/api/v1/namespaces/default/services')
for i in response['items']:
service = {}
service['name'] = i['metadata']['name']
service['namespace'] = i['metadata']['namespace']
service['ip'] = i['spec']['clusterIP']
self.openshift_services.append(service)
if self.args.verbose:
print "\nOpenshift Services found:\n"
print "{0:35} {1:25} {2:20}".format("Name", "Namespace", "IP")
for i in self.openshift_services:
print "{0:35} {1:25} {2:20}".format(i['name'], i['namespace'], i['ip'])
print "================================================\n"
def do_dns_check(self):
""" perform DNS checks against SkyDNS service """
print "\nPerforming DNS queries against SkyDNS...\n"
dns_resolver = resolver.Resolver(configure=False)
dns_resolver.nameservers.append(self.dns_host)
# Set dns_check to 1 (good) by default
dns_check = 1
for service in self.openshift_services:
name_to_resolve = service['name'] + '.' + service['namespace'] + '.svc.cluster.local'
try:
dns_answer = dns_resolver.query(name_to_resolve, 'A')
except dns_exception.DNSException as e:
print "Failed DNS lookup of %s. Error: %s" % (name_to_resolve, e)
print "\nTroubleshoot command: dig @%s %s A\n" % (self.dns_host, name_to_resolve)
dns_check = 0
break
if self.args.verbose:
print "\nQueryring for A record of %s on server %s" %(name_to_resolve, self.dns_host)
print "DNS Answer: %s" % dns_answer.rrset[0].address
print "Openshift Answer: %s" % service['ip']
if dns_answer.rrset[0].address != service['ip']:
dns_check = 0
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.query' : dns_check})
if __name__ == '__main__':
OMSZC = OpenshiftSkyDNSZaggClient()
OMSZC.run()
| apache-2.0 | -2,316,351,571,682,302,500 | 37.092896 | 116 | 0.613255 | false |
CVL-GitHub/karaage | karaage/legacy/institutes/south_migrations/0002_move_instutute_models_to_institutes_app.py | 3 | 8280 | # -*- coding: utf-8 -*-
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
("people", "0012_move_instutute_models_to_institutes_app"),
)
def forwards(self, orm):
# moved logic to karaage.people.migrations.0012_move_instutute_models_to_institutes_app
pass
def backwards(self, orm):
# moved logic to karaage.people.migrations.0012_move_instutute_models_to_institutes_app
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'institutes.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate'", 'to': "orm['people.Person']", 'through': "orm['institutes.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'institutes.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'people.person': {
'Meta': {'ordering': "['first_name', 'last_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['institutes']
| gpl-3.0 | 7,906,931,934,150,283,000 | 79.38835 | 250 | 0.550966 | false |
morphis/home-assistant | homeassistant/components/light/yeelight.py | 4 | 10411 | """
Support for Xiaomi Yeelight Wifi color bulb.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.yeelight/
"""
import logging
import colorsys
import voluptuous as vol
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_to_rgb)
from homeassistant.const import CONF_DEVICES, CONF_NAME
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_RGB_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP,
ATTR_FLASH, FLASH_SHORT, FLASH_LONG,
SUPPORT_BRIGHTNESS, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION,
SUPPORT_COLOR_TEMP, SUPPORT_FLASH,
Light, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['yeelight==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_TRANSITION = "transition"
DEFAULT_TRANSITION = 350
CONF_SAVE_ON_CHANGE = "save_on_change"
CONF_MODE_MUSIC = "use_music_mode"
DOMAIN = 'yeelight'
DEVICE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,
vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,
vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, })
SUPPORT_YEELIGHT_RGB = (SUPPORT_RGB_COLOR |
SUPPORT_COLOR_TEMP)
SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS |
SUPPORT_TRANSITION |
SUPPORT_FLASH)
def _cmd(func):
"""A wrapper to catch exceptions from the bulb."""
def _wrap(self, *args, **kwargs):
import yeelight
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return func(self, *args, **kwargs)
except yeelight.BulbException as ex:
_LOGGER.error("Error when calling %s: %s", func, ex)
return _wrap
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yeelight bulbs."""
lights = []
if discovery_info is not None:
_LOGGER.debug("Adding autodetected %s", discovery_info['hostname'])
# not using hostname, as it seems to vary.
name = "yeelight_%s_%s" % (discovery_info["device_type"],
discovery_info["properties"]["mac"])
device = {'name': name, 'ipaddr': discovery_info['host']}
lights.append(YeelightLight(device, DEVICE_SCHEMA({})))
else:
for ipaddr, device_config in config[CONF_DEVICES].items():
_LOGGER.debug("Adding configured %s", device_config[CONF_NAME])
device = {'name': device_config[CONF_NAME], 'ipaddr': ipaddr}
lights.append(YeelightLight(device, device_config))
add_devices(lights, True) # true to request an update before adding.
class YeelightLight(Light):
"""Representation of a Yeelight light."""
def __init__(self, device, config):
"""Initialize the light."""
self.config = config
self._name = device['name']
self._ipaddr = device['ipaddr']
self._supported_features = SUPPORT_YEELIGHT
self._available = False
self._bulb_device = None
self._brightness = None
self._color_temp = None
self._is_on = None
self._rgb = None
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def unique_id(self) -> str:
"""Return the ID of this light."""
return "{}.{}".format(self.__class__, self._ipaddr)
@property
def color_temp(self) -> int:
"""Return the color temperature."""
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._is_on
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
return self._brightness
def _get_rgb_from_properties(self):
rgb = self._properties.get("rgb", None)
color_mode = self._properties.get("color_mode", None)
if not rgb or not color_mode:
return rgb
color_mode = int(color_mode)
if color_mode == 2: # color temperature
return color_temperature_to_rgb(self.color_temp)
if color_mode == 3: # hsv
hue = self._properties.get("hue")
sat = self._properties.get("sat")
val = self._properties.get("bright")
return colorsys.hsv_to_rgb(hue, sat, val)
rgb = int(rgb)
blue = rgb & 0xff
green = (rgb >> 8) & 0xff
red = (rgb >> 16) & 0xff
return red, green, blue
@property
def rgb_color(self) -> tuple:
"""Return the color property."""
return self._rgb
@property
def _properties(self) -> dict:
return self._bulb.last_properties
@property
def _bulb(self) -> object:
import yeelight
if self._bulb_device is None:
try:
self._bulb_device = yeelight.Bulb(self._ipaddr)
self._bulb_device.get_properties() # force init for type
btype = self._bulb_device.bulb_type
if btype == yeelight.BulbType.Color:
self._supported_features |= SUPPORT_YEELIGHT_RGB
self._available = True
except yeelight.BulbException as ex:
self._available = False
_LOGGER.error("Failed to connect to bulb %s, %s: %s",
self._ipaddr, self._name, ex)
return self._bulb_device
def set_music_mode(self, mode) -> None:
"""Set the music mode on or off."""
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music()
def update(self) -> None:
"""Update properties from the bulb."""
import yeelight
try:
self._bulb.get_properties()
self._is_on = self._properties.get("power") == "on"
bright = self._properties.get("bright", None)
if bright:
self._brightness = 255 * (int(bright) / 100)
temp_in_k = self._properties.get("ct", None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._rgb = self._get_rgb_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available: # just inform once
_LOGGER.error("Unable to update bulb status: %s", ex)
self._available = False
@_cmd
def set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if brightness:
_LOGGER.debug("Setting brightness: %s", brightness)
self._bulb.set_brightness(brightness / 255 * 100,
duration=duration)
@_cmd
def set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if rgb and self.supported_features & SUPPORT_RGB_COLOR:
_LOGGER.debug("Setting RGB: %s", rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration)
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug("Setting color temp: %s K", temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration)
@_cmd
def set_default(self) -> None:
"""Set current options as default."""
self._bulb.set_default()
@_cmd
def set_flash(self, flash) -> None:
"""Activate flash."""
if flash:
from yeelight import RGBTransition, SleepTransition, Flow
if self._bulb.last_properties["color_mode"] != 1:
_LOGGER.error("Flash supported currently only in RGB mode.")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = self.rgb_color
transitions = list()
transitions.append(
RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(
duration=transition))
transitions.append(
RGBTransition(red, green, blue, brightness=self.brightness,
duration=duration))
flow = Flow(count=count, transitions=transitions)
self._bulb.start_flow(flow)
def turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
rgb = kwargs.get(ATTR_RGB_COLOR)
flash = kwargs.get(ATTR_FLASH)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
self._bulb.turn_on(duration=duration)
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
# values checked for none in methods
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE]:
if brightness or colortemp or rgb:
self.set_default()
def turn_off(self, **kwargs) -> None:
"""Turn off."""
self._bulb.turn_off()
| apache-2.0 | -7,655,247,535,868,886,000 | 32.261981 | 79 | 0.584862 | false |
yohanko88/gem5-DC | src/python/m5/params.py | 7 | 69061 | # Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Gabe Black
# Andreas Hansson
#####################################################################
#
# Parameter description classes
#
# The _params dictionary in each class maps parameter names to either
# a Param or a VectorParam object. These objects contain the
# parameter description string, the parameter type, and the default
# value (if any). The convert() method on these objects is used to
# force whatever value is assigned to the parameter to the appropriate
# type.
#
# Note that the default values are loaded into the class's attribute
# space when the parameter dictionary is initialized (in
# MetaSimObject._new_param()); after that point they aren't used.
#
#####################################################################
import copy
import datetime
import re
import sys
import time
import math
import proxy
import ticks
from util import *
def isSimObject(*args, **kwargs):
return SimObject.isSimObject(*args, **kwargs)
def isSimObjectSequence(*args, **kwargs):
return SimObject.isSimObjectSequence(*args, **kwargs)
def isSimObjectClass(*args, **kwargs):
return SimObject.isSimObjectClass(*args, **kwargs)
allParams = {}
class MetaParamValue(type):
def __new__(mcls, name, bases, dct):
cls = super(MetaParamValue, mcls).__new__(mcls, name, bases, dct)
assert name not in allParams
allParams[name] = cls
return cls
# Dummy base class to identify types that are legitimate for SimObject
# parameters.
class ParamValue(object):
__metaclass__ = MetaParamValue
cmd_line_settable = False
# Generate the code needed as a prerequisite for declaring a C++
# object of this type. Typically generates one or more #include
# statements. Used when declaring parameters of this type.
@classmethod
def cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for including a
# reference to a C++ object of this type in a SWIG .i file.
# Typically generates one or more %import or %include statements.
@classmethod
def swig_predecls(cls, code):
pass
# default for printing to .ini file is regular string conversion.
# will be overridden in some cases
def ini_str(self):
return str(self)
# default for printing to .json file is regular string conversion.
# will be overridden in some cases, mostly to use native Python
# types where there are similar JSON types
def config_value(self):
return str(self)
# Prerequisites for .ini parsing with cxx_ini_parse
@classmethod
def cxx_ini_predecls(cls, code):
pass
# parse a .ini file entry for this param from string expression
# src into lvalue dest (of the param's C++ type)
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('// Unhandled param type: %s' % cls.__name__)
code('%s false;' % ret)
# allows us to blithely call unproxy() on things without checking
# if they're really proxies or not
def unproxy(self, base):
return self
# Produce a human readable version of the stored value
def pretty_print(self, value):
return str(value)
# Regular parameter description.
class ParamDesc(object):
def __init__(self, ptype_str, ptype, *args, **kwargs):
self.ptype_str = ptype_str
# remember ptype only if it is provided
if ptype != None:
self.ptype = ptype
if args:
if len(args) == 1:
self.desc = args[0]
elif len(args) == 2:
self.default = args[0]
self.desc = args[1]
else:
raise TypeError, 'too many arguments'
if kwargs.has_key('desc'):
assert(not hasattr(self, 'desc'))
self.desc = kwargs['desc']
del kwargs['desc']
if kwargs.has_key('default'):
assert(not hasattr(self, 'default'))
self.default = kwargs['default']
del kwargs['default']
if kwargs:
raise TypeError, 'extra unknown kwargs %s' % kwargs
if not hasattr(self, 'desc'):
raise TypeError, 'desc attribute missing'
def __getattr__(self, attr):
if attr == 'ptype':
ptype = SimObject.allClasses[self.ptype_str]
assert isSimObjectClass(ptype)
self.ptype = ptype
return ptype
raise AttributeError, "'%s' object has no attribute '%s'" % \
(type(self).__name__, attr)
def example_str(self):
if hasattr(self.ptype, "ex_str"):
return self.ptype.ex_str
else:
return self.ptype_str
# Is the param available to be exposed on the command line
def isCmdLineSettable(self):
if hasattr(self.ptype, "cmd_line_settable"):
return self.ptype.cmd_line_settable
else:
return False
def convert(self, value):
if isinstance(value, proxy.BaseProxy):
value.set_param_desc(self)
return value
if not hasattr(self, 'ptype') and isNullPointer(value):
# deferred evaluation of SimObject; continue to defer if
# we're just assigning a null pointer
return value
if isinstance(value, self.ptype):
return value
if isNullPointer(value) and isSimObjectClass(self.ptype):
return value
return self.ptype(value)
def pretty_print(self, value):
if isinstance(value, proxy.BaseProxy):
return str(value)
if isNullPointer(value):
return NULL
return self.ptype(value).pretty_print(value)
def cxx_predecls(self, code):
code('#include <cstddef>')
self.ptype.cxx_predecls(code)
def swig_predecls(self, code):
self.ptype.swig_predecls(code)
def cxx_decl(self, code):
code('${{self.ptype.cxx_type}} ${{self.name}};')
# Vector-valued parameter description. Just like ParamDesc, except
# that the value is a vector (list) of the specified type instead of a
# single value.
class VectorParamValue(list):
__metaclass__ = MetaParamValue
def __setattr__(self, attr, value):
raise AttributeError, \
"Not allowed to set %s on '%s'" % (attr, type(self).__name__)
def config_value(self):
return [v.config_value() for v in self]
def ini_str(self):
return ' '.join([v.ini_str() for v in self])
def getValue(self):
return [ v.getValue() for v in self ]
def unproxy(self, base):
if len(self) == 1 and isinstance(self[0], proxy.AllProxy):
return self[0].unproxy(base)
else:
return [v.unproxy(base) for v in self]
class SimObjectVector(VectorParamValue):
# support clone operation
def __call__(self, **kwargs):
return SimObjectVector([v(**kwargs) for v in self])
def clear_parent(self, old_parent):
for v in self:
v.clear_parent(old_parent)
def set_parent(self, parent, name):
if len(self) == 1:
self[0].set_parent(parent, name)
else:
width = int(math.ceil(math.log(len(self))/math.log(10)))
for i,v in enumerate(self):
v.set_parent(parent, "%s%0*d" % (name, width, i))
def has_parent(self):
return reduce(lambda x,y: x and y, [v.has_parent() for v in self])
# return 'cpu0 cpu1' etc. for print_ini()
def get_name(self):
return ' '.join([v._name for v in self])
# By iterating through the constituent members of the vector here
# we can nicely handle iterating over all a SimObject's children
# without having to provide lots of special functions on
# SimObjectVector directly.
def descendants(self):
for v in self:
for obj in v.descendants():
yield obj
def get_config_as_dict(self):
a = []
for v in self:
a.append(v.get_config_as_dict())
return a
# If we are replacing an item in the vector, make sure to set the
# parent reference of the new SimObject to be the same as the parent
# of the SimObject being replaced. Useful to have if we created
# a SimObjectVector of temporary objects that will be modified later in
# configuration scripts.
def __setitem__(self, key, value):
val = self[key]
if value.has_parent():
warn("SimObject %s already has a parent" % value.get_name() +\
" that is being overwritten by a SimObjectVector")
value.set_parent(val.get_parent(), val._name)
super(SimObjectVector, self).__setitem__(key, value)
# Enumerate the params of each member of the SimObject vector. Creates
# strings that will allow indexing into the vector by the python code and
# allow it to be specified on the command line.
def enumerateParams(self, flags_dict = {},
cmd_line_str = "",
access_str = ""):
if hasattr(self, "_paramEnumed"):
print "Cycle detected enumerating params at %s?!" % (cmd_line_str)
else:
x = 0
for vals in self:
# Each entry in the SimObjectVector should be an
# instance of a SimObject
flags_dict = vals.enumerateParams(flags_dict,
cmd_line_str + "%d." % x,
access_str + "[%d]." % x)
x = x + 1
return flags_dict
class VectorParamDesc(ParamDesc):
# Convert assigned value to appropriate type. If the RHS is not a
# list or tuple, it generates a single-element list.
def convert(self, value):
if isinstance(value, (list, tuple)):
# list: coerce each element into new list
tmp_list = [ ParamDesc.convert(self, v) for v in value ]
elif isinstance(value, str):
# If input is a csv string
tmp_list = [ ParamDesc.convert(self, v) \
for v in value.strip('[').strip(']').split(',') ]
else:
# singleton: coerce to a single-element list
tmp_list = [ ParamDesc.convert(self, value) ]
if isSimObjectSequence(tmp_list):
return SimObjectVector(tmp_list)
else:
return VectorParamValue(tmp_list)
# Produce a human readable example string that describes
# how to set this vector parameter in the absence of a default
# value.
def example_str(self):
s = super(VectorParamDesc, self).example_str()
help_str = "[" + s + "," + s + ", ...]"
return help_str
# Produce a human readable representation of the value of this vector param.
def pretty_print(self, value):
if isinstance(value, (list, tuple)):
tmp_list = [ ParamDesc.pretty_print(self, v) for v in value ]
elif isinstance(value, str):
tmp_list = [ ParamDesc.pretty_print(self, v) for v in value.split(',') ]
else:
tmp_list = [ ParamDesc.pretty_print(self, value) ]
return tmp_list
# This is a helper function for the new config system
def __call__(self, value):
if isinstance(value, (list, tuple)):
# list: coerce each element into new list
tmp_list = [ ParamDesc.convert(self, v) for v in value ]
elif isinstance(value, str):
# If input is a csv string
tmp_list = [ ParamDesc.convert(self, v) \
for v in value.strip('[').strip(']').split(',') ]
else:
# singleton: coerce to a single-element list
tmp_list = [ ParamDesc.convert(self, value) ]
return VectorParamValue(tmp_list)
def swig_module_name(self):
return "%s_vector" % self.ptype_str
def swig_predecls(self, code):
code('%import "${{self.swig_module_name()}}.i"')
def swig_decl(self, code):
code('%module(package="m5.internal") ${{self.swig_module_name()}}')
code('%{')
self.ptype.cxx_predecls(code)
code('%}')
code()
# Make sure the SWIGPY_SLICE_ARG is defined through this inclusion
code('%include "std_container.i"')
code()
self.ptype.swig_predecls(code)
code()
code('%include "std_vector.i"')
code()
ptype = self.ptype_str
cxx_type = self.ptype.cxx_type
code('%template(vector_$ptype) std::vector< $cxx_type >;')
def cxx_predecls(self, code):
code('#include <vector>')
self.ptype.cxx_predecls(code)
def cxx_decl(self, code):
code('std::vector< ${{self.ptype.cxx_type}} > ${{self.name}};')
class ParamFactory(object):
def __init__(self, param_desc_class, ptype_str = None):
self.param_desc_class = param_desc_class
self.ptype_str = ptype_str
def __getattr__(self, attr):
if self.ptype_str:
attr = self.ptype_str + '.' + attr
return ParamFactory(self.param_desc_class, attr)
# E.g., Param.Int(5, "number of widgets")
def __call__(self, *args, **kwargs):
ptype = None
try:
ptype = allParams[self.ptype_str]
except KeyError:
# if name isn't defined yet, assume it's a SimObject, and
# try to resolve it later
pass
return self.param_desc_class(self.ptype_str, ptype, *args, **kwargs)
Param = ParamFactory(ParamDesc)
VectorParam = ParamFactory(VectorParamDesc)
#####################################################################
#
# Parameter Types
#
# Though native Python types could be used to specify parameter types
# (the 'ptype' field of the Param and VectorParam classes), it's more
# flexible to define our own set of types. This gives us more control
# over how Python expressions are converted to values (via the
# __init__() constructor) and how these values are printed out (via
# the __str__() conversion method).
#
#####################################################################
# String-valued parameter. Just mixin the ParamValue class with the
# built-in str class.
class String(ParamValue,str):
cxx_type = 'std::string'
cmd_line_settable = True
@classmethod
def cxx_predecls(self, code):
code('#include <string>')
@classmethod
def swig_predecls(cls, code):
code('%include "std_string.i"')
def __call__(self, value):
self = value
return value
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s = %s;' % (dest, src))
code('%s true;' % ret)
def getValue(self):
return self
# superclass for "numeric" parameter values, to emulate math
# operations in a type-safe way. e.g., a Latency times an int returns
# a new Latency object.
class NumericParamValue(ParamValue):
def __str__(self):
return str(self.value)
def __float__(self):
return float(self.value)
def __long__(self):
return long(self.value)
def __int__(self):
return int(self.value)
# hook for bounds checking
def _check(self):
return
def __mul__(self, other):
newobj = self.__class__(self)
newobj.value *= other
newobj._check()
return newobj
__rmul__ = __mul__
def __div__(self, other):
newobj = self.__class__(self)
newobj.value /= other
newobj._check()
return newobj
def __sub__(self, other):
newobj = self.__class__(self)
newobj.value -= other
newobj._check()
return newobj
def config_value(self):
return self.value
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
# The default for parsing PODs from an .ini entry is to extract from an
# istringstream and let overloading choose the right type according to
# the dest type.
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s to_number(%s, %s);' % (ret, src, dest))
# Metaclass for bounds-checked integer parameters. See CheckedInt.
class CheckedIntType(MetaParamValue):
def __init__(cls, name, bases, dict):
super(CheckedIntType, cls).__init__(name, bases, dict)
# CheckedInt is an abstract base class, so we actually don't
# want to do any processing on it... the rest of this code is
# just for classes that derive from CheckedInt.
if name == 'CheckedInt':
return
if not (hasattr(cls, 'min') and hasattr(cls, 'max')):
if not (hasattr(cls, 'size') and hasattr(cls, 'unsigned')):
panic("CheckedInt subclass %s must define either\n" \
" 'min' and 'max' or 'size' and 'unsigned'\n",
name);
if cls.unsigned:
cls.min = 0
cls.max = 2 ** cls.size - 1
else:
cls.min = -(2 ** (cls.size - 1))
cls.max = (2 ** (cls.size - 1)) - 1
# Abstract superclass for bounds-checked integer parameters. This
# class is subclassed to generate parameter classes with specific
# bounds. Initialization of the min and max bounds is done in the
# metaclass CheckedIntType.__init__.
class CheckedInt(NumericParamValue):
__metaclass__ = CheckedIntType
cmd_line_settable = True
def _check(self):
if not self.min <= self.value <= self.max:
raise TypeError, 'Integer param out of bounds %d < %d < %d' % \
(self.min, self.value, self.max)
def __init__(self, value):
if isinstance(value, str):
self.value = convert.toInteger(value)
elif isinstance(value, (int, long, float, NumericParamValue)):
self.value = long(value)
else:
raise TypeError, "Can't convert object of type %s to CheckedInt" \
% type(value).__name__
self._check()
def __call__(self, value):
self.__init__(value)
return value
@classmethod
def cxx_predecls(cls, code):
# most derived types require this, so we just do it here once
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
# most derived types require this, so we just do it here once
code('%import "stdint.i"')
code('%import "base/types.hh"')
def getValue(self):
return long(self.value)
class Int(CheckedInt): cxx_type = 'int'; size = 32; unsigned = False
class Unsigned(CheckedInt): cxx_type = 'unsigned'; size = 32; unsigned = True
class Int8(CheckedInt): cxx_type = 'int8_t'; size = 8; unsigned = False
class UInt8(CheckedInt): cxx_type = 'uint8_t'; size = 8; unsigned = True
class Int16(CheckedInt): cxx_type = 'int16_t'; size = 16; unsigned = False
class UInt16(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Int32(CheckedInt): cxx_type = 'int32_t'; size = 32; unsigned = False
class UInt32(CheckedInt): cxx_type = 'uint32_t'; size = 32; unsigned = True
class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
class Cycles(CheckedInt):
cxx_type = 'Cycles'
size = 64
unsigned = True
def getValue(self):
from m5.internal.core import Cycles
return Cycles(self.value)
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('uint64_t _temp;')
code('bool _ret = to_number(%s, _temp);' % src)
code('if (_ret)')
code(' %s = Cycles(_temp);' % dest)
code('%s _ret;' % ret)
class Float(ParamValue, float):
cxx_type = 'double'
cmd_line_settable = True
def __init__(self, value):
if isinstance(value, (int, long, float, NumericParamValue, Float, str)):
self.value = float(value)
else:
raise TypeError, "Can't convert object of type %s to Float" \
% type(value).__name__
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return float(self.value)
def config_value(self):
return self
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class MemorySize(CheckedInt):
cxx_type = 'uint64_t'
ex_str = '512MB'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class MemorySize32(CheckedInt):
cxx_type = 'uint32_t'
ex_str = '512MB'
size = 32
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class Addr(CheckedInt):
cxx_type = 'Addr'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, Addr):
self.value = value.value
else:
try:
# Often addresses are referred to with sizes. Ex: A device
# base address is at "512MB". Use toMemorySize() to convert
# these into addresses. If the address is not specified with a
# "size", an exception will occur and numeric translation will
# proceed below.
self.value = convert.toMemorySize(value)
except (TypeError, ValueError):
# Convert number to string and use long() to do automatic
# base conversion (requires base=0 for auto-conversion)
self.value = long(str(value), base=0)
self._check()
def __add__(self, other):
if isinstance(other, Addr):
return self.value + other.value
else:
return self.value + other
def pretty_print(self, value):
try:
val = convert.toMemorySize(value)
except TypeError:
val = long(value)
return "0x%x" % long(val)
class AddrRange(ParamValue):
cxx_type = 'AddrRange'
def __init__(self, *args, **kwargs):
# Disable interleaving and hashing by default
self.intlvHighBit = 0
self.xorHighBit = 0
self.intlvBits = 0
self.intlvMatch = 0
def handle_kwargs(self, kwargs):
# An address range needs to have an upper limit, specified
# either explicitly with an end, or as an offset using the
# size keyword.
if 'end' in kwargs:
self.end = Addr(kwargs.pop('end'))
elif 'size' in kwargs:
self.end = self.start + Addr(kwargs.pop('size')) - 1
else:
raise TypeError, "Either end or size must be specified"
# Now on to the optional bit
if 'intlvHighBit' in kwargs:
self.intlvHighBit = int(kwargs.pop('intlvHighBit'))
if 'xorHighBit' in kwargs:
self.xorHighBit = int(kwargs.pop('xorHighBit'))
if 'intlvBits' in kwargs:
self.intlvBits = int(kwargs.pop('intlvBits'))
if 'intlvMatch' in kwargs:
self.intlvMatch = int(kwargs.pop('intlvMatch'))
if len(args) == 0:
self.start = Addr(kwargs.pop('start'))
handle_kwargs(self, kwargs)
elif len(args) == 1:
if kwargs:
self.start = Addr(args[0])
handle_kwargs(self, kwargs)
elif isinstance(args[0], (list, tuple)):
self.start = Addr(args[0][0])
self.end = Addr(args[0][1])
else:
self.start = Addr(0)
self.end = Addr(args[0]) - 1
elif len(args) == 2:
self.start = Addr(args[0])
self.end = Addr(args[1])
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
def __str__(self):
return '%s:%s:%s:%s:%s:%s' \
% (self.start, self.end, self.intlvHighBit, self.xorHighBit,\
self.intlvBits, self.intlvMatch)
def size(self):
# Divide the size by the size of the interleaving slice
return (long(self.end) - long(self.start) + 1) >> self.intlvBits
@classmethod
def cxx_predecls(cls, code):
Addr.cxx_predecls(code)
code('#include "base/addr_range.hh"')
@classmethod
def swig_predecls(cls, code):
Addr.swig_predecls(code)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('uint64_t _start, _end, _intlvHighBit = 0, _xorHighBit = 0;')
code('uint64_t _intlvBits = 0, _intlvMatch = 0;')
code('char _sep;')
code('std::istringstream _stream(${src});')
code('_stream >> _start;')
code('_stream.get(_sep);')
code('_stream >> _end;')
code('if (!_stream.fail() && !_stream.eof()) {')
code(' _stream.get(_sep);')
code(' _stream >> _intlvHighBit;')
code(' _stream.get(_sep);')
code(' _stream >> _xorHighBit;')
code(' _stream.get(_sep);')
code(' _stream >> _intlvBits;')
code(' _stream.get(_sep);')
code(' _stream >> _intlvMatch;')
code('}')
code('bool _ret = !_stream.fail() &&'
'_stream.eof() && _sep == \':\';')
code('if (_ret)')
code(' ${dest} = AddrRange(_start, _end, _intlvHighBit, \
_xorHighBit, _intlvBits, _intlvMatch);')
code('${ret} _ret;')
def getValue(self):
# Go from the Python class to the wrapped C++ class generated
# by swig
from m5.internal.range import AddrRange
return AddrRange(long(self.start), long(self.end),
int(self.intlvHighBit), int(self.xorHighBit),
int(self.intlvBits), int(self.intlvMatch))
# Boolean parameter type. Python doesn't let you subclass bool, since
# it doesn't want to let you create multiple instances of True and
# False. Thus this is a little more complicated than String.
class Bool(ParamValue):
cxx_type = 'bool'
cmd_line_settable = True
def __init__(self, value):
try:
self.value = convert.toBool(value)
except TypeError:
self.value = bool(value)
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return bool(self.value)
def __str__(self):
return str(self.value)
# implement truth value testing for Bool parameters so that these params
# evaluate correctly during the python configuration phase
def __nonzero__(self):
return bool(self.value)
def ini_str(self):
if self.value:
return 'true'
return 'false'
def config_value(self):
return self.value
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('%s to_bool(%s, %s);' % (ret, src, dest))
def IncEthernetAddr(addr, val = 1):
bytes = map(lambda x: int(x, 16), addr.split(':'))
bytes[5] += val
for i in (5, 4, 3, 2, 1):
val,rem = divmod(bytes[i], 256)
bytes[i] = rem
if val == 0:
break
bytes[i - 1] += val
assert(bytes[0] <= 255)
return ':'.join(map(lambda x: '%02x' % x, bytes))
_NextEthernetAddr = "00:90:00:00:00:01"
def NextEthernetAddr():
global _NextEthernetAddr
value = _NextEthernetAddr
_NextEthernetAddr = IncEthernetAddr(_NextEthernetAddr, 1)
return value
class EthernetAddr(ParamValue):
cxx_type = 'Net::EthAddr'
ex_str = "00:90:00:00:00:01"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if value == NextEthernetAddr:
self.value = value
return
if not isinstance(value, str):
raise TypeError, "expected an ethernet address and didn't get one"
bytes = value.split(':')
if len(bytes) != 6:
raise TypeError, 'invalid ethernet address %s' % value
for byte in bytes:
if not 0 <= int(byte, base=16) <= 0xff:
raise TypeError, 'invalid ethernet address %s' % value
self.value = value
def __call__(self, value):
self.__init__(value)
return value
def unproxy(self, base):
if self.value == NextEthernetAddr:
return EthernetAddr(self.value())
return self
def getValue(self):
from m5.internal.params import EthAddr
return EthAddr(self.value)
def __str__(self):
return self.value
def ini_str(self):
return self.value
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s = Net::EthAddr(%s);' % (dest, src))
code('%s true;' % ret)
# When initializing an IpAddress, pass in an existing IpAddress, a string of
# the form "a.b.c.d", or an integer representing an IP.
class IpAddress(ParamValue):
cxx_type = 'Net::IpAddress'
ex_str = "127.0.0.1"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if isinstance(value, IpAddress):
self.ip = value.ip
else:
try:
self.ip = convert.toIpAddress(value)
except TypeError:
self.ip = long(value)
self.verifyIp()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
tup = [(self.ip >> i) & 0xff for i in (24, 16, 8, 0)]
return '%d.%d.%d.%d' % tuple(tup)
def __eq__(self, other):
if isinstance(other, IpAddress):
return self.ip == other.ip
elif isinstance(other, str):
try:
return self.ip == convert.toIpAddress(other)
except:
return False
else:
return self.ip == other
def __ne__(self, other):
return not (self == other)
def verifyIp(self):
if self.ip < 0 or self.ip >= (1 << 32):
raise TypeError, "invalid ip address %#08x" % self.ip
def getValue(self):
from m5.internal.params import IpAddress
return IpAddress(self.ip)
# When initializing an IpNetmask, pass in an existing IpNetmask, a string of
# the form "a.b.c.d/n" or "a.b.c.d/e.f.g.h", or an ip and netmask as
# positional or keyword arguments.
class IpNetmask(IpAddress):
cxx_type = 'Net::IpNetmask'
ex_str = "127.0.0.0/24"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'netmask')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'netmask' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'netmask', args[0])
elif isinstance(args[0], IpNetmask):
self.ip = args[0].ip
self.netmask = args[0].netmask
else:
(self.ip, self.netmask) = convert.toIpNetmask(args[0])
elif len(args) == 2:
self.ip = args[0]
self.netmask = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s/%d" % (super(IpNetmask, self).__str__(), self.netmask)
def __eq__(self, other):
if isinstance(other, IpNetmask):
return self.ip == other.ip and self.netmask == other.netmask
elif isinstance(other, str):
try:
return (self.ip, self.netmask) == convert.toIpNetmask(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.netmask < 0 or self.netmask > 32:
raise TypeError, "invalid netmask %d" % netmask
def getValue(self):
from m5.internal.params import IpNetmask
return IpNetmask(self.ip, self.netmask)
# When initializing an IpWithPort, pass in an existing IpWithPort, a string of
# the form "a.b.c.d:p", or an ip and port as positional or keyword arguments.
class IpWithPort(IpAddress):
cxx_type = 'Net::IpWithPort'
ex_str = "127.0.0.1:80"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'port')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'port' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'port', args[0])
elif isinstance(args[0], IpWithPort):
self.ip = args[0].ip
self.port = args[0].port
else:
(self.ip, self.port) = convert.toIpWithPort(args[0])
elif len(args) == 2:
self.ip = args[0]
self.port = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s:%d" % (super(IpWithPort, self).__str__(), self.port)
def __eq__(self, other):
if isinstance(other, IpWithPort):
return self.ip == other.ip and self.port == other.port
elif isinstance(other, str):
try:
return (self.ip, self.port) == convert.toIpWithPort(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.port < 0 or self.port > 0xffff:
raise TypeError, "invalid port %d" % self.port
def getValue(self):
from m5.internal.params import IpWithPort
return IpWithPort(self.ip, self.port)
time_formats = [ "%a %b %d %H:%M:%S %Z %Y",
"%a %b %d %H:%M:%S %Y",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M",
"%Y/%m/%d",
"%m/%d/%Y %H:%M:%S",
"%m/%d/%Y %H:%M",
"%m/%d/%Y",
"%m/%d/%y %H:%M:%S",
"%m/%d/%y %H:%M",
"%m/%d/%y"]
def parse_time(value):
from time import gmtime, strptime, struct_time, time
from datetime import datetime, date
if isinstance(value, struct_time):
return value
if isinstance(value, (int, long)):
return gmtime(value)
if isinstance(value, (datetime, date)):
return value.timetuple()
if isinstance(value, str):
if value in ('Now', 'Today'):
return time.gmtime(time.time())
for format in time_formats:
try:
return strptime(value, format)
except ValueError:
pass
raise ValueError, "Could not parse '%s' as a time" % value
class Time(ParamValue):
cxx_type = 'tm'
@classmethod
def cxx_predecls(cls, code):
code('#include <time.h>')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/time.i"')
def __init__(self, value):
self.value = parse_time(value)
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
from m5.internal.params import tm
c_time = tm()
py_time = self.value
# UNIX is years since 1900
c_time.tm_year = py_time.tm_year - 1900;
# Python starts at 1, UNIX starts at 0
c_time.tm_mon = py_time.tm_mon - 1;
c_time.tm_mday = py_time.tm_mday;
c_time.tm_hour = py_time.tm_hour;
c_time.tm_min = py_time.tm_min;
c_time.tm_sec = py_time.tm_sec;
# Python has 0 as Monday, UNIX is 0 as sunday
c_time.tm_wday = py_time.tm_wday + 1
if c_time.tm_wday > 6:
c_time.tm_wday -= 7;
# Python starts at 1, Unix starts at 0
c_time.tm_yday = py_time.tm_yday - 1;
return c_time
def __str__(self):
return time.asctime(self.value)
def ini_str(self):
return str(self)
def get_config_as_dict(self):
assert false
return str(self)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <time.h>')
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('char *_parse_ret = strptime((${src}).c_str(),')
code(' "%a %b %d %H:%M:%S %Y", &(${dest}));')
code('${ret} _parse_ret && *_parse_ret == \'\\0\';');
# Enumerated types are a little more complex. The user specifies the
# type as Enum(foo) where foo is either a list or dictionary of
# alternatives (typically strings, but not necessarily so). (In the
# long run, the integer value of the parameter will be the list index
# or the corresponding dictionary value. For now, since we only check
# that the alternative is valid and then spit it into a .ini file,
# there's not much point in using the dictionary.)
# What Enum() must do is generate a new type encapsulating the
# provided list/dictionary so that specific values of the parameter
# can be instances of that type. We define two hidden internal
# classes (_ListEnum and _DictEnum) to serve as base classes, then
# derive the new type from the appropriate base class on the fly.
allEnums = {}
# Metaclass for Enum types
class MetaEnum(MetaParamValue):
def __new__(mcls, name, bases, dict):
assert name not in allEnums
cls = super(MetaEnum, mcls).__new__(mcls, name, bases, dict)
allEnums[name] = cls
return cls
def __init__(cls, name, bases, init_dict):
if init_dict.has_key('map'):
if not isinstance(cls.map, dict):
raise TypeError, "Enum-derived class attribute 'map' " \
"must be of type dict"
# build list of value strings from map
cls.vals = cls.map.keys()
cls.vals.sort()
elif init_dict.has_key('vals'):
if not isinstance(cls.vals, list):
raise TypeError, "Enum-derived class attribute 'vals' " \
"must be of type list"
# build string->value map from vals sequence
cls.map = {}
for idx,val in enumerate(cls.vals):
cls.map[val] = idx
else:
raise TypeError, "Enum-derived class must define "\
"attribute 'map' or 'vals'"
cls.cxx_type = 'Enums::%s' % name
super(MetaEnum, cls).__init__(name, bases, init_dict)
# Generate C++ class declaration for this enum type.
# Note that we wrap the enum in a class/struct to act as a namespace,
# so that the enum strings can be brief w/o worrying about collisions.
def cxx_decl(cls, code):
wrapper_name = cls.wrapper_name
wrapper = 'struct' if cls.wrapper_is_struct else 'namespace'
name = cls.__name__ if cls.enum_name is None else cls.enum_name
idem_macro = '__ENUM__%s__%s__' % (wrapper_name, name)
code('''\
#ifndef $idem_macro
#define $idem_macro
$wrapper $wrapper_name {
enum $name {
''')
code.indent(2)
for val in cls.vals:
code('$val = ${{cls.map[val]}},')
code('Num_$name = ${{len(cls.vals)}}')
code.dedent(2)
code(' };')
if cls.wrapper_is_struct:
code(' static const char *${name}Strings[Num_${name}];')
code('};')
else:
code('extern const char *${name}Strings[Num_${name}];')
code('}')
code()
code('#endif // $idem_macro')
def cxx_def(cls, code):
wrapper_name = cls.wrapper_name
file_name = cls.__name__
name = cls.__name__ if cls.enum_name is None else cls.enum_name
code('#include "enums/$file_name.hh"')
if cls.wrapper_is_struct:
code('const char *${wrapper_name}::${name}Strings'
'[Num_${name}] =')
else:
code('namespace Enums {')
code.indent(1)
code(' const char *${name}Strings[Num_${name}] =')
code('{')
code.indent(1)
for val in cls.vals:
code('"$val",')
code.dedent(1)
code('};')
if not cls.wrapper_is_struct:
code('} // namespace $wrapper_name')
code.dedent(1)
def swig_decl(cls, code):
name = cls.__name__
code('''\
%module(package="m5.internal") enum_$name
%{
#include "enums/$name.hh"
%}
%include "enums/$name.hh"
''')
# Base class for enum types.
class Enum(ParamValue):
__metaclass__ = MetaEnum
vals = []
cmd_line_settable = True
# The name of the wrapping namespace or struct
wrapper_name = 'Enums'
# If true, the enum is wrapped in a struct rather than a namespace
wrapper_is_struct = False
# If not None, use this as the enum name rather than this class name
enum_name = None
def __init__(self, value):
if value not in self.map:
raise TypeError, "Enum param got bad value '%s' (not in %s)" \
% (value, self.vals)
self.value = value
def __call__(self, value):
self.__init__(value)
return value
@classmethod
def cxx_predecls(cls, code):
code('#include "enums/$0.hh"', cls.__name__)
@classmethod
def swig_predecls(cls, code):
code('%import "python/m5/internal/enum_$0.i"', cls.__name__)
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('if (false) {')
for elem_name in cls.map.iterkeys():
code('} else if (%s == "%s") {' % (src, elem_name))
code.indent()
code('%s = Enums::%s;' % (dest, elem_name))
code('%s true;' % ret)
code.dedent()
code('} else {')
code(' %s false;' % ret)
code('}')
def getValue(self):
return int(self.map[self.value])
def __str__(self):
return self.value
# how big does a rounding error need to be before we warn about it?
frequency_tolerance = 0.001 # 0.1%
class TickParamValue(NumericParamValue):
cxx_type = 'Tick'
ex_str = "1MHz"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
code('%import "stdint.i"')
code('%import "base/types.hh"')
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return long(self.value)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
# Ticks are expressed in seconds in JSON files and in plain
# Ticks in .ini files. Switch based on a config flag
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('${ret} to_number(${src}, ${dest});')
class Latency(TickParamValue):
ex_str = "100ns"
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.toLatency(value)
def __call__(self, value):
self.__init__(value)
return value
def __getattr__(self, attr):
if attr in ('latency', 'period'):
return self
if attr == 'frequency':
return Frequency(self)
raise AttributeError, "Latency object has no attribute '%s'" % attr
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(self.value)
return long(value)
def config_value(self):
return self.getValue()
# convert latency to ticks
def ini_str(self):
return '%d' % self.getValue()
class Frequency(TickParamValue):
ex_str = "1GHz"
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
if value.value == 0:
self.value = 0
else:
self.value = 1.0 / value.value
self.ticks = value.ticks
elif isinstance(value, Frequency):
self.value = value.value
self.ticks = value.ticks
else:
self.ticks = False
self.value = convert.toFrequency(value)
def __call__(self, value):
self.__init__(value)
return value
def __getattr__(self, attr):
if attr == 'frequency':
return self
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
# convert latency to ticks
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(1.0 / self.value)
return long(value)
def config_value(self):
return self.getValue()
def ini_str(self):
return '%d' % self.getValue()
# A generic Frequency and/or Latency value. Value is stored as a
# latency, just like Latency and Frequency.
class Clock(TickParamValue):
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.anyToLatency(value)
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s" % Latency(self)
def __getattr__(self, attr):
if attr == 'frequency':
return Frequency(self)
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
def getValue(self):
return self.period.getValue()
def config_value(self):
return self.period.config_value()
def ini_str(self):
return self.period.ini_str()
class Voltage(float,ParamValue):
cxx_type = 'double'
ex_str = "1V"
cmd_line_settable = True
def __new__(cls, value):
# convert to voltage
val = convert.toVoltage(value)
return super(cls, Voltage).__new__(cls, val)
def __call__(self, value):
val = convert.toVoltage(value)
self.__init__(val)
return value
def __str__(self):
return str(self.getValue())
def getValue(self):
value = float(self)
return value
def ini_str(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class Current(float, ParamValue):
cxx_type = 'double'
ex_str = "1mA"
cmd_line_settable = False
def __new__(cls, value):
# convert to current
val = convert.toCurrent(value)
return super(cls, Current).__new__(cls, val)
def __call__(self, value):
val = convert.toCurrent(value)
self.__init__(val)
return value
def __str__(self):
return str(self.getValue())
def getValue(self):
value = float(self)
return value
def ini_str(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class NetworkBandwidth(float,ParamValue):
cxx_type = 'float'
ex_str = "1Gbps"
cmd_line_settable = True
def __new__(cls, value):
# convert to bits per second
val = convert.toNetworkBandwidth(value)
return super(cls, NetworkBandwidth).__new__(cls, val)
def __str__(self):
return str(self.val)
def __call__(self, value):
val = convert.toNetworkBandwidth(value)
self.__init__(val)
return value
def getValue(self):
# convert to seconds per byte
value = 8.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
def config_value(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class MemoryBandwidth(float,ParamValue):
cxx_type = 'float'
ex_str = "1GB/s"
cmd_line_settable = True
def __new__(cls, value):
# convert to bytes per second
val = convert.toMemoryBandwidth(value)
return super(cls, MemoryBandwidth).__new__(cls, val)
def __call__(self, value):
val = convert.toMemoryBandwidth(value)
self.__init__(val)
return value
def getValue(self):
# convert to seconds per byte
value = float(self)
if value:
value = 1.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
def config_value(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
#
# "Constants"... handy aliases for various values.
#
# Special class for NULL pointers. Note the special check in
# make_param_value() above that lets these be assigned where a
# SimObject is required.
# only one copy of a particular node
class NullSimObject(object):
__metaclass__ = Singleton
def __call__(cls):
return cls
def _instantiate(self, parent = None, path = ''):
pass
def ini_str(self):
return 'Null'
def unproxy(self, base):
return self
def set_path(self, parent, name):
pass
def __str__(self):
return 'Null'
def config_value(self):
return None
def getValue(self):
return None
# The only instance you'll ever need...
NULL = NullSimObject()
def isNullPointer(value):
return isinstance(value, NullSimObject)
# Some memory range specifications use this as a default upper bound.
MaxAddr = Addr.max
MaxTick = Tick.max
AllMemory = AddrRange(0, MaxAddr)
#####################################################################
#
# Port objects
#
# Ports are used to interconnect objects in the memory system.
#
#####################################################################
# Port reference: encapsulates a reference to a particular port on a
# particular SimObject.
class PortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.peer = None # not associated with another port yet
self.ccConnected = False # C++ port connection done?
self.index = -1 # always -1 for non-vector ports
def __str__(self):
return '%s.%s' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected ports, i.e. 0 is we have no
# peer and 1 if we do.
return int(self.peer != None)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return str(self.peer)
# for config.json
def get_config_as_dict(self):
return {'role' : self.role, 'peer' : str(self.peer)}
def __getattr__(self, attr):
if attr == 'peerObj':
# shorthand for proxies
return self.peer.simobj
raise AttributeError, "'%s' object has no attribute '%s'" % \
(self.__class__.__name__, attr)
# Full connection is symmetric (both ways). Called via
# SimObject.__setattr__ as a result of a port assignment, e.g.,
# "obj1.portA = obj2.portB", or via VectorPortElementRef.__setitem__,
# e.g., "obj1.portA[3] = obj2.portB".
def connect(self, other):
if isinstance(other, VectorPortRef):
# reference to plain VectorPort is implicit append
other = other._get_next()
if self.peer and not proxy.isproxy(self.peer):
fatal("Port %s is already connected to %s, cannot connect %s\n",
self, self.peer, other);
self.peer = other
if proxy.isproxy(other):
other.set_param_desc(PortParamDesc())
elif isinstance(other, PortRef):
if other.peer is not self:
other.connect(self)
else:
raise TypeError, \
"assigning non-port reference '%s' to port '%s'" \
% (other, self)
# Allow a master/slave port pair to be spliced between
# a port and its connected peer. Useful operation for connecting
# instrumentation structures into a system when it is necessary
# to connect the instrumentation after the full system has been
# constructed.
def splice(self, new_master_peer, new_slave_peer):
if self.peer and not proxy.isproxy(self.peer):
if isinstance(new_master_peer, PortRef) and \
isinstance(new_slave_peer, PortRef):
old_peer = self.peer
if self.role == 'SLAVE':
self.peer = new_master_peer
old_peer.peer = new_slave_peer
new_master_peer.connect(self)
new_slave_peer.connect(old_peer)
elif self.role == 'MASTER':
self.peer = new_slave_peer
old_peer.peer = new_master_peer
new_slave_peer.connect(self)
new_master_peer.connect(old_peer)
else:
panic("Port %s has unknown role, "+\
"cannot splice in new peers\n", self)
else:
raise TypeError, \
"Splicing non-port references '%s','%s' to port '%s'"\
% (new_peer, peers_new_peer, self)
else:
fatal("Port %s not connected, cannot splice in new peers\n", self)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
if self.peer and not proxy.isproxy(self.peer):
peerObj = self.peer.simobj(_memo=memo)
newRef.peer = self.peer.clone(peerObj, memo)
assert(not isinstance(newRef.peer, VectorPortRef))
return newRef
def unproxy(self, simobj):
assert(simobj is self.simobj)
if proxy.isproxy(self.peer):
try:
realPeer = self.peer.unproxy(self.simobj)
except:
print "Error in unproxying port '%s' of %s" % \
(self.name, self.simobj.path())
raise
self.connect(realPeer)
# Call C++ to create corresponding port connection between C++ objects
def ccConnect(self):
from m5.internal.pyobject import connectPorts
if self.role == 'SLAVE':
# do nothing and let the master take care of it
return
if self.ccConnected: # already done this
return
peer = self.peer
if not self.peer: # nothing to connect to
return
# check that we connect a master to a slave
if self.role == peer.role:
raise TypeError, \
"cannot connect '%s' and '%s' due to identical role '%s'" \
% (peer, self, self.role)
try:
# self is always the master and peer the slave
connectPorts(self.simobj.getCCObject(), self.name, self.index,
peer.simobj.getCCObject(), peer.name, peer.index)
except:
print "Error connecting port %s.%s to %s.%s" % \
(self.simobj.path(), self.name,
peer.simobj.path(), peer.name)
raise
self.ccConnected = True
peer.ccConnected = True
# A reference to an individual element of a VectorPort... much like a
# PortRef, but has an index.
class VectorPortElementRef(PortRef):
def __init__(self, simobj, name, role, index):
PortRef.__init__(self, simobj, name, role)
self.index = index
def __str__(self):
return '%s.%s[%d]' % (self.simobj, self.name, self.index)
# A reference to a complete vector-valued port (not just a single element).
# Can be indexed to retrieve individual VectorPortElementRef instances.
class VectorPortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.elements = []
def __str__(self):
return '%s.%s[:]' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected peers, corresponding the the
# length of the elements.
return len(self.elements)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return ' '.join([el.ini_str() for el in self.elements])
# for config.json
def get_config_as_dict(self):
return {'role' : self.role,
'peer' : [el.ini_str() for el in self.elements]}
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
if key >= len(self.elements):
# need to extend list
ext = [VectorPortElementRef(self.simobj, self.name, self.role, i)
for i in range(len(self.elements), key+1)]
self.elements.extend(ext)
return self.elements[key]
def _get_next(self):
return self[len(self.elements)]
def __setitem__(self, key, value):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
self[key].connect(value)
def connect(self, other):
if isinstance(other, (list, tuple)):
# Assign list of port refs to vector port.
# For now, append them... not sure if that's the right semantics
# or if it should replace the current vector.
for ref in other:
self._get_next().connect(ref)
else:
# scalar assignment to plain VectorPort is implicit append
self._get_next().connect(other)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
newRef.elements = [el.clone(simobj, memo) for el in self.elements]
return newRef
def unproxy(self, simobj):
[el.unproxy(simobj) for el in self.elements]
def ccConnect(self):
[el.ccConnect() for el in self.elements]
# Port description object. Like a ParamDesc object, this represents a
# logical port in the SimObject class, not a particular port on a
# SimObject instance. The latter are represented by PortRef objects.
class Port(object):
# Generate a PortRef for this port on the given SimObject with the
# given name
def makeRef(self, simobj):
return PortRef(simobj, self.name, self.role)
# Connect an instance of this port (on the given SimObject with
# the given name) with the port described by the supplied PortRef
def connect(self, simobj, ref):
self.makeRef(simobj).connect(ref)
# No need for any pre-declarations at the moment as we merely rely
# on an unsigned int.
def cxx_predecls(self, code):
pass
# Declare an unsigned int with the same name as the port, that
# will eventually hold the number of connected ports (and thus the
# number of elements for a VectorPort).
def cxx_decl(self, code):
code('unsigned int port_${{self.name}}_connection_count;')
class MasterPort(Port):
# MasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
else:
raise TypeError, 'wrong number of arguments'
class SlavePort(Port):
# SlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
else:
raise TypeError, 'wrong number of arguments'
# VectorPort description object. Like Port, but represents a vector
# of connections (e.g., as on a XBar).
class VectorPort(Port):
def __init__(self, *args):
self.isVec = True
def makeRef(self, simobj):
return VectorPortRef(simobj, self.name, self.role)
class VectorMasterPort(VectorPort):
# VectorMasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
class VectorSlavePort(VectorPort):
# VectorSlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
# 'Fake' ParamDesc for Port references to assign to the _pdesc slot of
# proxy objects (via set_param_desc()) so that proxy error messages
# make sense.
class PortParamDesc(object):
__metaclass__ = Singleton
ptype_str = 'Port'
ptype = Port
baseEnums = allEnums.copy()
baseParams = allParams.copy()
def clear():
global allEnums, allParams
allEnums = baseEnums.copy()
allParams = baseParams.copy()
__all__ = ['Param', 'VectorParam',
'Enum', 'Bool', 'String', 'Float',
'Int', 'Unsigned', 'Int8', 'UInt8', 'Int16', 'UInt16',
'Int32', 'UInt32', 'Int64', 'UInt64',
'Counter', 'Addr', 'Tick', 'Percent',
'TcpPort', 'UdpPort', 'EthernetAddr',
'IpAddress', 'IpNetmask', 'IpWithPort',
'MemorySize', 'MemorySize32',
'Latency', 'Frequency', 'Clock', 'Voltage',
'NetworkBandwidth', 'MemoryBandwidth',
'AddrRange',
'MaxAddr', 'MaxTick', 'AllMemory',
'Time',
'NextEthernetAddr', 'NULL',
'MasterPort', 'SlavePort',
'VectorMasterPort', 'VectorSlavePort']
import SimObject
| bsd-3-clause | -1,208,349,046,389,557,500 | 31.40779 | 84 | 0.574666 | false |
h8rift/android_kernel_htc_msm8960-evita-1_85 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 | 4,573,170,353,502,690,000 | 29.52439 | 78 | 0.642829 | false |
mikebrevard/UnixAdministration | vagrant/etc/data/genData/venv/lib/python3.4/site-packages/setuptools/depends.py | 462 | 6370 | import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| mit | 1,575,771,434,378,027,500 | 28.627907 | 78 | 0.599215 | false |
arkmaxim/grpc | src/python/grpcio/grpc/framework/foundation/logging_pool.py | 21 | 3030 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A thread pool that logs exceptions raised by tasks executed within it."""
import logging
from concurrent import futures
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception(
'Unexpected exception from %s executed in logging pool!', behavior)
raise
return _wrapping
class _LoggingPool(object):
"""An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
def __init__(self, backing_pool):
self._backing_pool = backing_pool
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._backing_pool.shutdown(wait=True)
def submit(self, fn, *args, **kwargs):
return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
def map(self, func, *iterables, **kwargs):
return self._backing_pool.map(
_wrap(func), *iterables, timeout=kwargs.get('timeout', None))
def shutdown(self, wait=True):
self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
"""Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
Returns:
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
| bsd-3-clause | 8,552,951,231,461,415,000 | 35.95122 | 79 | 0.738614 | false |
pingpan2013/sensor-box-project | sensor_project/sensor_box.py | 1 | 6325 | #!/usr/bin/env python
#
# File Name: sensor_box.py
#
# Desc:
# Control the sensor to get humidity and moisture infomation
# If internet is down, store the result into local files
# else send the data to the database
#
import os
import time
import datetime
import logging
import subprocess
import RPi.GPIO as GPIO
import conf
from led import turn_LED_on, turn_LED_off, LED_YELLOW, LED_GREEN
import server_conn
import log_management as log_m
import csv_data
import mois_sensor
import humi_sensor
import curr_sensor
import temp_sensor
import water_level
GPIO.setwarnings(False)
def main():
'''
The main process of this project:
1. Get data from sensors, including humidity/moisture/temperature
2. Store the data locally
3. Check if Internet is available; if so, send the data to the server
4. Wait to restart cycle
'''
# Some preparation work
# Use yellow LED to indicate code is running
turn_LED_on(LED_YELLOW)
internet_working = True
humidity = None
temp_f = None
moistures = None
temps = None
water_depth = None
# Time spent waiting for sensor values with reduced noise
sensor_reading_time = conf.water_level_interval
# Ensure required picture directories exist
if not os.path.exists(conf.online_pictures_folder):
os.makedirs(conf.online_pictures_folder)
if not os.path.exists(conf.offline_pictures_folder):
os.makedirs(conf.offline_pictures_folder)
# Initial stuff at the top of the log file.
log_m.start_log()
# Begin main program loop
while True:
# Get current timestamp first
now = datetime.datetime.now()
picture_filename_timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
mysql_timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
csv_timestamp = now.strftime('%m/%d/%y %I:%M %p')
pic_filename = str(picture_filename_timestamp) + '.jpg'
pic_path = conf.offline_pictures_folder + pic_filename
logging.info('Beginning data collection cycle')
# STEP 1: Get all data from sensors
try:
if conf.using_humidity_sensor:
humidity, temp_f = humi_sensor.get_humidity_and_temp()
logging.debug("Humidity: %.1f %%" % humidity)
logging.debug("Internal Temperature: %.1f F" % temp_f)
if conf.num_moisture_sensors > 0:
moistures = mois_sensor.get_moistures(conf.num_moisture_sensors)
sensor_chr = 'A'
for moisture in moistures:
logging.debug("Moisture " + sensor_chr + " = " + str(moisture))
sensor_chr = chr(ord(sensor_chr) + 1)
if len(moistures) != conf.num_moisture_sensors:
logging.error("Number of moistures doesn't match conf")
if conf.num_temp_sensors > 0:
temps = temp_sensor.get_temp_data_f()
sensor_chr = 'A'
for temp in temps:
logging.debug("Temperature " + sensor_chr + " = " + str(temp))
sensor_chr = chr(ord(sensor_chr) + 1)
if len(temps) != conf.num_temp_sensors:
logging.error("Number of temperatures doesn't match conf")
if conf.using_water_level_sensor:
water_depth = water_level.get_inches(conf.water_level_interval)
except:
logging.exception("Exception occurred while reading sensors")
logging.info('Gathered data')
if conf.using_camera:
subprocess.call(['raspistill', '-o', pic_path])
logging.debug('Took picture ' + pic_filename)
# STEP 2: Store data locally
# Initialize CSV file if not present
csv_data.initialize()
# Add new line of data to CSV file
csv_data.write_data(temp_f,
humidity,
moistures,
temps,
water_depth,
csv_timestamp
)
logging.debug("Next reading to be collected in "
+ str(float(conf.period)/60.0) + " minutes")
time.sleep((conf.period - sensor_reading_time)/2)
# STEP 3: Send data to the server and database if Internet is available
try:
if server_conn.internet_on():
if not internet_working:
logging.warning('Internet restored; sending data...')
internet_working = True
else:
logging.info('Sending data...')
# Turn on green LED to indicate Internet usage
turn_LED_on(LED_GREEN)
if conf.using_camera:
try:
# store pictures to FTP server
server_conn.store_data_to_ftp(pic_filename)
os.system("rm -f " + conf.offline_pictures_folder + '*')
except IOError:
logging.exception('Could not send picture file')
try:
# store data in database
server_conn.store_data_to_db(temp_f,
humidity,
moistures,
temps,
water_depth,
mysql_timestamp
)
except:
logging.exception('Exception occurred with database code')
logging.info('Data sent')
else:
if internet_working:
logging.warning('Internet is down; could not send data')
internet_working = False
except:
logging.exception('Exception occurred when trying to send data')
turn_LED_off(LED_GREEN)
# STEP 4: Wait until cycle starts over again
time.sleep((conf.period - sensor_reading_time)/2)
# End of main loop
turn_LED_off(LED_YELLOW)
logging.info('Exited main loop. Stopping data recording')
if __name__ == '__main__':
main()
| gpl-3.0 | 153,513,993,433,730,660 | 38.285714 | 83 | 0.543083 | false |
ict-felix/stack | modules/resource/manager/transit-network/src/proxy_interface.py | 2 | 1172 | # Copyright 2014-2015 National Institute of Advanced Industrial Science and Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from abc import ABCMeta, abstractmethod
# class Handler(SimpleXMLRPCRequestHandler):
class Proxy:
__metaclass__ = ABCMeta
@abstractmethod
def reserve(resv):
pass
@abstractmethod
def modify(resv, end_time_sec):
pass
@abstractmethod
def provision(resv):
pass
@abstractmethod
def release(resv):
pass
@abstractmethod
def terminate(resv):
pass
| apache-2.0 | 3,965,946,899,514,179,600 | 26.904762 | 86 | 0.734642 | false |
apophys/freeipa | ipaclient/remote_plugins/2_156/netgroup.py | 16 | 24373 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Netgroups
A netgroup is a group used for permission checking. It can contain both
user and host values.
EXAMPLES:
Add a new netgroup:
ipa netgroup-add --desc="NFS admins" admins
Add members to the netgroup:
ipa netgroup-add-member --users=tuser1 --users=tuser2 admins
Remove a member from the netgroup:
ipa netgroup-remove-member --users=tuser2 admins
Display information about a netgroup:
ipa netgroup-show admins
Delete a netgroup:
ipa netgroup-del admins
""")
register = Registry()
@register()
class netgroup(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Netgroup name'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
label=_(u'NIS domain name'),
),
parameters.Str(
'ipauniqueid',
required=False,
label=_(u'IPA unique ID'),
doc=_(u'IPA unique ID'),
),
parameters.Str(
'usercategory',
required=False,
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
),
parameters.Str(
'member_netgroup',
required=False,
label=_(u'Member netgroups'),
),
parameters.Str(
'memberof_netgroup',
required=False,
label=_(u'Member of netgroups'),
),
parameters.Str(
'memberindirect_netgroup',
required=False,
label=_(u'Indirect Member netgroups'),
),
parameters.Str(
'memberuser_user',
required=False,
label=_(u'Member User'),
),
parameters.Str(
'memberuser_group',
required=False,
label=_(u'Member Group'),
),
parameters.Str(
'memberhost_host',
required=False,
label=_(u'Member Host'),
),
parameters.Str(
'memberhost_hostgroup',
required=False,
label=_(u'Member Hostgroup'),
),
)
@register()
class netgroup_add(Method):
__doc__ = _("Add a new netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class netgroup_add_member(Method):
__doc__ = _("Add members to a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'member user'),
doc=_(u'users to add'),
alwaysask=True,
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'member group'),
doc=_(u'groups to add'),
alwaysask=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to add'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to add'),
alwaysask=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'member netgroup'),
doc=_(u'netgroups to add'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be added'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members added'),
),
)
@register()
class netgroup_del(Method):
__doc__ = _("Delete a netgroup.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class netgroup_find(Method):
__doc__ = _("Search for a netgroup.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'ipauniqueid',
required=False,
cli_name='uuid',
label=_(u'IPA unique ID'),
doc=_(u'IPA unique ID'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'private',
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'managed',
doc=_(u'search for managed groups'),
default=False,
default_from=DefaultFrom(lambda private: private),
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("name")'),
default=False,
autofill=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups with these member netgroups.'),
),
parameters.Str(
'no_netgroup',
required=False,
multivalue=True,
cli_name='no_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups without these member netgroups.'),
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'user'),
doc=_(u'Search for netgroups with these member users.'),
),
parameters.Str(
'no_user',
required=False,
multivalue=True,
cli_name='no_users',
label=_(u'user'),
doc=_(u'Search for netgroups without these member users.'),
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'group'),
doc=_(u'Search for netgroups with these member groups.'),
),
parameters.Str(
'no_group',
required=False,
multivalue=True,
cli_name='no_groups',
label=_(u'group'),
doc=_(u'Search for netgroups without these member groups.'),
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'host'),
doc=_(u'Search for netgroups with these member hosts.'),
),
parameters.Str(
'no_host',
required=False,
multivalue=True,
cli_name='no_hosts',
label=_(u'host'),
doc=_(u'Search for netgroups without these member hosts.'),
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'host group'),
doc=_(u'Search for netgroups with these member host groups.'),
),
parameters.Str(
'no_hostgroup',
required=False,
multivalue=True,
cli_name='no_hostgroups',
label=_(u'host group'),
doc=_(u'Search for netgroups without these member host groups.'),
),
parameters.Str(
'in_netgroup',
required=False,
multivalue=True,
cli_name='in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups with these member of netgroups.'),
),
parameters.Str(
'not_in_netgroup',
required=False,
multivalue=True,
cli_name='not_in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups without these member of netgroups.'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class netgroup_mod(Method):
__doc__ = _("Modify a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class netgroup_remove_member(Method):
__doc__ = _("Remove members from a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'member user'),
doc=_(u'users to remove'),
alwaysask=True,
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'member group'),
doc=_(u'groups to remove'),
alwaysask=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to remove'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to remove'),
alwaysask=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'member netgroup'),
doc=_(u'netgroups to remove'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be removed'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members removed'),
),
)
@register()
class netgroup_show(Method):
__doc__ = _("Display information about a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 | 3,399,233,281,070,995,500 | 27.176879 | 162 | 0.473352 | false |
pjbull/mkdocs | mkdocs/commands/new.py | 30 | 1433 | # coding: utf-8
from __future__ import unicode_literals
import io
import logging
import os
config_text = 'site_name: My Docs\n'
index_text = """# Welcome to MkDocs
For full documentation visit [mkdocs.org](http://mkdocs.org).
## Commands
* `mkdocs new [dir-name]` - Create a new project.
* `mkdocs serve` - Start the live-reloading docs server.
* `mkdocs build` - Build the documentation site.
* `mkdocs help` - Print this help message.
## Project layout
mkdocs.yml # The configuration file.
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.
"""
log = logging.getLogger(__name__)
def new(output_dir):
docs_dir = os.path.join(output_dir, 'docs')
config_path = os.path.join(output_dir, 'mkdocs.yml')
index_path = os.path.join(docs_dir, 'index.md')
if os.path.exists(config_path):
log.info('Project already exists.')
return
if not os.path.exists(output_dir):
log.info('Creating project directory: %s', output_dir)
os.mkdir(output_dir)
log.info('Writing config file: %s', config_path)
io.open(config_path, 'w', encoding='utf-8').write(config_text)
if os.path.exists(index_path):
return
log.info('Writing initial docs: %s', index_path)
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
io.open(index_path, 'w', encoding='utf-8').write(index_text)
| bsd-2-clause | 2,044,111,099,377,121,500 | 25.537037 | 66 | 0.649686 | false |
jameskdev/lge-kernel-batman_skt | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 | 606,358,078,985,005,200 | 31.634146 | 93 | 0.669656 | false |
damonkohler/sl4a | python/xmpppy/doc/examples/xtalk.py | 87 | 2951 | #!/usr/bin/python
# $Id: xtalk.py,v 1.4 2008/08/09 17:00:18 normanr Exp $
import sys,os,xmpp,time,select
class Bot:
def __init__(self,jabber,remotejid):
self.jabber = jabber
self.remotejid = remotejid
def register_handlers(self):
self.jabber.RegisterHandler('message',self.xmpp_message)
def xmpp_message(self, con, event):
type = event.getType()
fromjid = event.getFrom().getStripped()
body = event.getBody()
if type in ['message', 'chat', None] and fromjid == self.remotejid and body:
sys.stdout.write(body + '\n')
def stdio_message(self, message):
m = xmpp.protocol.Message(to=self.remotejid,body=message,typ='chat')
self.jabber.send(m)
def xmpp_connect(self):
con=self.jabber.connect()
if not con:
sys.stderr.write('could not connect!\n')
return False
sys.stderr.write('connected with %s\n'%con)
auth=self.jabber.auth(jid.getNode(),jidparams['password'],resource=jid.getResource())
if not auth:
sys.stderr.write('could not authenticate!\n')
return False
sys.stderr.write('authenticated using %s\n'%auth)
self.register_handlers()
return con
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Syntax: xtalk JID"
sys.exit(0)
tojid=sys.argv[1]
jidparams={}
if os.access(os.environ['HOME']+'/.xtalk',os.R_OK):
for ln in open(os.environ['HOME']+'/.xtalk').readlines():
if not ln[0] in ('#',';'):
key,val=ln.strip().split('=',1)
jidparams[key.lower()]=val
for mandatory in ['jid','password']:
if mandatory not in jidparams.keys():
open(os.environ['HOME']+'/.xtalk','w').write('#Uncomment fields before use and type in correct credentials.\n#[email protected]/resource (/resource is optional)\n#PASSWORD=juliet\n')
print 'Please point ~/.xtalk config file to valid JID for sending messages.'
sys.exit(0)
jid=xmpp.protocol.JID(jidparams['jid'])
cl=xmpp.Client(jid.getDomain())#,debug=[])
bot=Bot(cl,tojid)
if not bot.xmpp_connect():
sys.stderr.write("Could not connect to server, or password mismatch!\n")
sys.exit(1)
#cl.SendInitPresence(requestRoster=0) # you may need to uncomment this for old server
socketlist = {cl.Connection._sock:'xmpp',sys.stdin:'stdio'}
online = 1
while online:
(i , o, e) = select.select(socketlist.keys(),[],[],1)
for each in i:
if socketlist[each] == 'xmpp':
cl.Process(1)
elif socketlist[each] == 'stdio':
msg = sys.stdin.readline().rstrip('\r\n')
bot.stdio_message(msg)
else:
raise Exception("Unknown socket type: %s" % repr(socketlist[each]))
#cl.disconnect()
| apache-2.0 | 2,296,433,828,746,531,800 | 34.554217 | 199 | 0.582853 | false |
yesudeep/cmc | app/console/app/pygments/styles/native.py | 23 | 1917 | # -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: 2006-2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
| mit | 9,144,831,126,393,611,000 | 28.492308 | 67 | 0.51591 | false |
zzicewind/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 | 4,907,189,462,197,784,000 | 26.04918 | 86 | 0.598182 | false |
mindpin/mindpin_oppia | core/controllers/reader.py | 1 | 11151 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia reader view."""
__author__ = 'Sean Lip'
from core.controllers import base
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import skins_services
from core.domain import stats_services
from core.domain import widget_registry
import feconf
import jinja_utils
import jinja2
def require_viewer(handler):
"""Decorator that checks if the user can view the given exploration."""
def test_can_view(self, exploration_id, **kwargs):
"""Checks if the user for the current session is logged in."""
if rights_manager.Actor(self.user_id).can_view(exploration_id):
return handler(self, exploration_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_view
class ExplorationPage(base.BaseHandler):
"""Page describing a single exploration."""
@require_viewer
def get(self, exploration_id):
"""Handles GET requests."""
version = self.request.get('v')
if not version:
# The default value for a missing parameter seems to be ''.
version = None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
if not rights_manager.Actor(self.user_id).can_view(exploration_id):
raise self.PageNotFoundException
is_iframed = (self.request.get('iframed') == 'true')
# TODO(sll): Cache these computations.
interactive_widget_ids = exploration.get_interactive_widget_ids()
widget_js_directives = (
widget_registry.Registry.get_noninteractive_widget_js() +
widget_registry.Registry.get_interactive_widget_js(
interactive_widget_ids))
self.values.update({
'content': skins_services.get_skin_html(exploration.default_skin),
'exploration_version': version,
'iframed': is_iframed,
'is_private': rights_manager.is_exploration_private(exploration_id),
'nav_mode': feconf.NAV_MODE_EXPLORE,
'widget_js_directives': jinja2.utils.Markup(widget_js_directives),
})
if is_iframed:
self.render_template(
'reader/reader_exploration.html', iframe_restriction=None)
else:
self.render_template('reader/reader_exploration.html')
class ExplorationHandler(base.BaseHandler):
"""Provides the initial data for a single exploration."""
def get(self, exploration_id):
"""Populates the data on the individual exploration page."""
# TODO(sll): Maybe this should send a complete state machine to the
# frontend, and all interaction would happen client-side?
version = self.request.get('v')
if not version:
version = None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
init_params = exploration.get_init_params()
reader_params = exploration.update_with_state_params(
exploration.init_state_name, init_params)
init_state = exploration.init_state
interactive_widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, init_state.widget.widget_id)
interactive_html = interactive_widget.get_interactive_widget_tag(
init_state.widget.customization_args, reader_params)
self.values.update({
'block_number': 0,
'init_html': init_state.content[0].to_html(reader_params),
'interactive_html': interactive_html,
'params': reader_params,
'state_history': [exploration.init_state_name],
'state_name': exploration.init_state_name,
'title': exploration.title,
})
self.render_json(self.values)
stats_services.EventHandler.record_state_hit(
exploration_id, exploration.init_state_name, True)
class FeedbackHandler(base.BaseHandler):
"""Handles feedback to readers."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
def _append_answer_to_stats_log(
self, old_state, answer, exploration_id, old_state_name,
old_params, handler, rule):
"""Append the reader's answer to the statistics log."""
widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, old_state.widget.widget_id)
recorded_answer = widget.get_stats_log_html(
old_state.widget.customization_args, old_params, answer)
stats_services.EventHandler.record_answer_submitted(
exploration_id, old_state_name, handler, str(rule),
recorded_answer)
def _append_content(self, exploration, sticky, finished, old_params,
new_state, new_state_name, state_has_changed,
html_output):
"""Appends content for the new state to the output variables."""
if finished:
return {}, html_output, ''
else:
# Populate new parameters.
new_params = exploration.update_with_state_params(
new_state_name, old_params)
if state_has_changed:
# Append the content for the new state.
state_html = exploration.states[
new_state_name].content[0].to_html(new_params)
if html_output and state_html:
html_output += '<br>'
html_output += state_html
interactive_html = (
'' if sticky else
widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, new_state.widget.widget_id
).get_interactive_widget_tag(
new_state.widget.customization_args, new_params)
)
return (new_params, html_output, interactive_html)
@require_viewer
def post(self, exploration_id, escaped_state_name):
"""Handles feedback interactions with readers."""
old_state_name = self.unescape_state_name(escaped_state_name)
# The reader's answer.
answer = self.payload.get('answer')
# The answer handler (submit, click, etc.)
handler = self.payload.get('handler')
# The 0-based index of the last content block already on the page.
block_number = self.payload.get('block_number') + 1
# Parameters associated with the reader.
old_params = self.payload.get('params', {})
old_params['answer'] = answer
# The reader's state history.
state_history = self.payload['state_history']
# The version of the exploration.
version = self.payload.get('version')
values = {}
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
old_state = exploration.states[old_state_name]
rule = exploration.classify(
old_state_name, handler, answer, old_params)
feedback = rule.get_feedback_string()
new_state_name = rule.dest
new_state = (
None if new_state_name == feconf.END_DEST
else exploration.states[new_state_name])
stats_services.EventHandler.record_state_hit(
exploration_id, new_state_name,
(new_state_name not in state_history))
state_history.append(new_state_name)
# If the new state widget is the same as the old state widget, and the
# new state widget is sticky, do not render the reader response. The
# interactive widget in the frontend should take care of this.
# TODO(sll): This special-casing is not great; we should
# make the interface for updating the frontend more generic so that
# all the updates happen in the same place. Perhaps in the non-sticky
# case we should call a frontend method named appendFeedback() or
# similar.
sticky = (
new_state_name != feconf.END_DEST and
new_state.widget.sticky and
new_state.widget.widget_id == old_state.widget.widget_id
)
self._append_answer_to_stats_log(
old_state, answer, exploration_id, old_state_name, old_params,
handler, rule)
# Append the reader's answer to the response HTML.
old_widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, old_state.widget.widget_id)
reader_response_html = old_widget.get_reader_response_html(
old_state.widget.customization_args, old_params, answer, sticky)
values['reader_response_html'] = reader_response_html
# Add Oppia's feedback to the response HTML.
html_output = '<div>%s</div>' % jinja_utils.parse_string(
feedback, old_params)
# Add the content for the new state to the response HTML.
finished = (new_state_name == feconf.END_DEST)
state_has_changed = (old_state_name != new_state_name)
new_params, html_output, interactive_html = (
self._append_content(
exploration, sticky, finished, old_params, new_state,
new_state_name, state_has_changed, html_output))
values.update({
'interactive_html': interactive_html,
'exploration_id': exploration_id,
'state_name': new_state_name,
'oppia_html': html_output,
'block_number': block_number,
'params': new_params,
'finished': finished,
'state_history': state_history,
})
self.render_json(values)
class ReaderFeedbackHandler(base.BaseHandler):
"""Submits feedback from the reader."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_viewer
def post(self, exploration_id, escaped_state_name):
"""Handles POST requests."""
state_name = self.unescape_state_name(escaped_state_name)
feedback = self.payload.get('feedback')
state_history = self.payload.get('state_history')
version = self.payload.get('version')
# TODO(sll): Add the reader's history log here.
stats_services.EventHandler.record_state_feedback_from_reader(
exploration_id, state_name, feedback,
{'state_history': state_history})
| apache-2.0 | 2,130,386,595,372,999,000 | 38.402827 | 80 | 0.62676 | false |
madan96/sympy | sympy/printing/rcode.py | 7 | 14467 | """
R code printer
The RCodePrinter converts single sympy expressions into single R expressions,
using the functions defined in math.h where possible.
"""
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import string_types, range
from sympy.codegen.ast import Assignment
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from sympy.sets.fancysets import Range
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in RCodePrinter._print_Function(self)
known_functions = {
#"Abs": [(lambda x: not x.is_integer, "fabs")],
"Abs": "abs",
"gamma": "gamma",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceiling",
"sign": "sign",
}
# These are the core reserved words in the R language. Taken from:
# https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Reserved-words
reserved_words = ['if',
'else',
'repeat',
'while',
'function',
'for',
'in',
'next',
'break',
'TRUE',
'FALSE',
'NULL',
'Inf',
'NaN',
'NA',
'NA_integer_',
'NA_real_',
'NA_complex_',
'NA_character_',
'volatile']
class RCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of R code"""
printmethod = "_rcode"
language = "R"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
_operators = {
'and':'&',
'or': '|',
}
_relationals = {
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (%(var)s in %(start)s:%(end)s){"
for i in indices:
# R arrays start at 1 and end at dimension
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower+1),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0/%d.0' % (p, q)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "exp(1)"
def _print_Pi(self, expr):
return 'pi'
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
#if isinstance(expr.rhs, Piecewise):
# # Here we modify Piecewise so each expression is now
# # an Assignment, and then continue on the print.
# expressions = []
# conditions = []
# for (e, c) in rhs.args:
# expressions.append(Assignment(lhs, e))
# conditions.append(c)
# temp = Piecewise(*zip(expressions, conditions))
# return self._print(temp)
#elif isinstance(lhs, MatrixSymbol):
if isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
if expr.args[-1].cond == True:
last_line = "%s" % self._print(expr.args[-1].expr)
else:
last_line = "ifelse(%s,%s,NA)" % (self._print(expr.args[-1].cond), self._print(expr.args[-1].expr))
code=last_line
for e, c in reversed(expr.args[:-1]):
code= "ifelse(%s,%s," % (self._print(c), self._print(e))+code+")"
return(code)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
_piecewise = Piecewise((expr.args[1], expr.args[0]), (expr.args[2], True))
return self._print(_piecewise)
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(expr.parent, expr.j +
expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super(RCodePrinter, self)._print_Symbol(expr)
if expr in self._dereference:
return '(*{0})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return ("{0} {1} {2}").format(lhs_code, op, rhs_code)
def _print_sinc(self, expr):
from sympy.functions.elementary.trigonometric import sin
from sympy.core.relational import Ne
from sympy.functions import Piecewise
_piecewise = Piecewise(
(sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))
return self._print(_piecewise)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
op = expr.rel_op
rhs_code = self._print(expr.rhs)
return "{0} {1} {2};".format(lhs_code, op, rhs_code)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('for ({target} = {start}; {target} < {stop}; {target} += '
'{step}) {{\n{body}\n}}').format(target=target, start=start,
stop=stop, step=step, body=body)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of r code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired R string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
rfunction_string)] or [(argument_test, rfunction_formater)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rcode((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau^(7.0/2.0)'
>>> rcode(sin(x), assign_to="s")
's = sin(x);'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
or if the R-function takes a subset of the original arguments:
>>> rcode(2**x + 3**x, user_functions={'Pow': [
... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e),
... (lambda b, e: b != 2, 'pow')]})
'exp2(x) + pow(3, x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rcode(expr, assign_to=tau))
tau = ifelse(x > 0,x + 1,x);
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rcode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rcode(mat, A))
A[0] = x^2;
A[1] = ifelse(x > 0,x + 1,x);
A[2] = sin(x);
"""
return RCodePrinter(settings).doprint(expr, assign_to)
def print_rcode(expr, **settings):
"""Prints R representation of the given expression."""
print(rcode(expr, **settings))
| bsd-3-clause | -3,705,948,427,523,972,600 | 34.114078 | 111 | 0.564941 | false |
jss-emr/openerp-7-src | openerp/addons/l10n_br/__init__.py | 430 | 1403 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
| agpl-3.0 | 2,489,140,001,022,693,400 | 65.809524 | 79 | 0.391304 | false |
siconos/siconos-deb | wrap/doxy2swig.py | 1 | 17516 | #!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Usage:
doxy2swig.py [options] input.xml output.i
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
######################################################################
#
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# Thanks:
# Johan Hake: the include_function_definition feature
# Bill Spotz: bug reports and testing.
# Sebastian Henschel: Misc. enhancements.
#
######################################################################
import shlex
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
import optparse
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest):
if hasattr(dest, "write"):
return dest
else:
return open(dest, 'wb')
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, include_function_definition=True, quiet=False):
"""Initialize the instance given a source object. `src` can
be a file or filename. If you do not want to include function
definitions from doxygen then set
`include_function_definition` to `False`. This is handy since
this allows you to use the swig generated function definition
using %feature("autodoc", [0,1]).
"""
f = my_open_read(src)
self.src = src
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ['inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref']
#self.generics = []
self.include_function_definition = include_function_definition
if not include_function_definition:
self.ignores.append('argsstring')
self.quiet = quiet
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
# replacements for swig docstrings and processed xml
txt = node.data
# txt = txt.replace('SP::', '')
# txt = txt.replace('SA::', '')
# txt = txt.replace('SPC::', '')
# txt = txt.replace('std::', '')
# txt = txt.replace('std11::', '')
# txt = txt.replace('boost::', '')
# txt = txt.replace('boost::', '')
# processed xml update
node.data = txt
# replacements for swig docstrings only
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
self.add_text(textwrap.fill(txt, break_long_words=False))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def parse_Comment(self, node):
"""Parse a `COMMENT_NODE`. This does nothing for now."""
return
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if hasattr(value, '__iter__'):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
def do_formula(self, node):
self.add_text(' ')
data = '{0}'.format(node.firstChild.data).strip().\
replace('\\', r'\\\\').\
replace('"', r'\"').replace('$', '').strip()
if len(data) <= 20:
self.add_text(' :math:`{0}` '.format(data))
else:
self.add_text("""
.. math::
:nowrap:
{0}
""".format(data))
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
self.add_text('%%feature("docstring") %s "\n'%data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if n in first:
self.parse(first[n])
self.add_text(['";','\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
pass
# self.add_text('C++ includes: ')
# self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
text='unknown'
for key, val in node.attributes.items():
if key == 'kind':
if val == 'param': text = 'Parameters'
elif val == 'exception': text = 'Exceptions'
else: text = val
break
self.add_text(['\n', '\n', text, ':', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
try:
data = node.firstChild.data
except AttributeError: # perhaps a <ref> tag in it
if hasattr(node, 'firstChild'):
if hasattr(node.firstChild, 'firstChild'):
data = node.firstChild.firstChild.data
else:
return
else:
return
if data.find('Exception') != -1:
self.add_text(data)
else:
self.add_text("%s: "%data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
#if 'definition ' not in first or \
# kind in ['variable', 'typedef']:
# return
if self.include_function_definition:
defn = first['definition'].firstChild.data
# remove return type information
defn = '.'.join(shlex.split(defn)[-1].split('::'))
first['definition'].firstChild.data = defn
else:
defn = ""
self.add_text('\n')
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(ns, name, defn))
else:
self.add_text(' %s "\n%s'%(name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func', 'user-defined', ''):
self.generic_parse(node)
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = node.firstChild.data
self.add_text('\n/*\n %s \n*/\n'%data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx+2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.generic_parse(nd)
self.add_text('\n*/\n')
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
pass
# args = node.firstChild.data
# remove chars after closing parent
# a1 = args.split(')')[0]
# a2 = [ shlex.split(a) for a in a1.split('(')[1].split(',') ]
# a3 = []
# for l in a2:
# if len(l) > 0:
# a3 += [ l[-1].strip('&*').replace('false','False').replace('true','True') ]
# else:
# a3 += [ '' ]
# a4 = ', '.join(a3)
# node.firstChild.data = a4
# self.add_text('({0})'.format(a4))
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print("parsing file: %s"%fname)
p = Doxy2SWIG(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname):
o = my_open_write(fname)
if self.multi:
o.write("".join(self.pieces).encode('ascii', 'ignore').strip())
else:
o.write("".join(self.clean_pieces(self.pieces)).encode('ascii', 'ignore').strip())
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:':
ret.extend([i, '\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = i.strip()
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG(input, include_function_definition, quiet)
p.generate()
dir_input = os.path.dirname(input)
pdir = os.path.join(dir_input, 'processed')
try:
os.mkdir(pdir)
except:
pass
base_input = os.path.basename(input)
# try:
with open(os.path.join(pdir,'{0}'.format(base_input)), 'wb') as pxml_file:
pxml_file.write(p.xmldoc.toxml().encode('ascii', 'ignore').strip())
p.write(output)
#except Exception as e:
#print ('doxy2swig.py: {0}'.format(e))
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimize output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], not options.func_def, options.quiet)
if __name__ == '__main__':
main()
| apache-2.0 | 5,929,007,981,534,552,000 | 32.427481 | 94 | 0.524606 | false |
TechInvestLab/dot15926 | editor_qt/iso15926/patterns/patterns_actions.py | 1 | 15984 | """
.15925 Editor
Copyright 2014 TechInvestLab.ru [email protected]
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""
import iso15926.common.dialogs as dialogs
from framework.dialogs import Notify, Choice
from _ordereddict import ordereddict
def GenPartName(option, default = None):
if default:
i = 0
name_gen = default
else:
i = 1
name_gen = 'entity%i'%i
while True:
for p in option['parts']:
if 'self' in p and p['self']==name_gen:
break
else:
return name_gen
i += 1
name_gen = 'entity%i'%i
class DocumentPropertyChange():
props_change = True
def __init__(self, doc, prop, value):
self.doc = doc
self.prop = prop
self.value = value
def Redo(self):
self.old_value = getattr(self.doc, self.prop, None)
if self.old_value == self.value:
return False
self.doc.UpdateProps({self.prop: self.value})
return True
def Undo(self):
self.doc.UpdateProps({self.prop: self.old_value})
self.doc.RefreshProps()
class DocumentModifyPatterns():
def __init__(self, doc, pattern, delete = False, new = True):
self.doc = doc
self.pattern = pattern
self.delete = delete
self.new = new
def Redo(self):
if self.delete:
if self.pattern['name'] not in self.doc.patterns or self.doc.patterns[self.pattern['name']] != self.pattern:
return False
del self.doc.patterns[self.pattern['name']]
wizard.W_PatternDeleted(self.pattern)
else:
if not self.new:
counter = 1
name = self.pattern['name']
while name:
for p in self.doc.patterns.iterkeys():
if name == p:
name = '%s%i'%(self.pattern['name'], counter)
counter += 1
break
else:
self.pattern['name'] = name
break
if self.pattern['name'] in self.doc.patterns:
Notify(tm.main.pattern_already_exist)
return False
self.doc.patterns[self.pattern['name']] = self.pattern
wizard.W_PatternAdded(self.doc, self.pattern, new = self.new)
self.new = False
return True
def Undo(self):
if self.delete:
self.doc.patterns[self.pattern['name']] = self.pattern
wizard.W_PatternAdded(self.doc, self.pattern)
else:
del self.doc.patterns[self.pattern['name']]
wizard.W_PatternDeleted(self.pattern)
class DocumentChangePatternName():
def __init__(self, doc, pattern, new_name):
self.doc = doc
self.pattern = pattern
self.new_name = new_name
def Redo(self):
self.old_name = self.pattern['name']
if not self.new_name:
Notify(tm.main.empty_pattern_name)
return False
if self.old_name == self.new_name:
return False
if self.new_name in self.doc.patterns:
Notify(tm.main.pattern_option_already_exist)
return False
self.pattern['name'] = self.new_name
del self.doc.patterns[self.old_name]
self.doc.patterns[self.new_name] = self.pattern
wizard.W_PatternNameChanged(self.pattern)
return True
def Undo(self):
self.pattern['name'] = self.old_name
del self.doc.patterns[self.new_name]
self.doc.patterns[self.old_name] = self.pattern
wizard.W_PatternNameChanged(self.pattern)
class DocumentChangePatternProperty():
def __init__(self, doc, pattern, prop, value):
self.doc = doc
self.pattern = pattern
self.prop = prop
self.value = value
def Redo(self):
self.old_value = self.pattern.get(self.prop, type(self.value)())
if self.value == self.old_value:
return False
self.pattern[self.prop] = self.value
wizard.W_PatternPropsChanged(self.pattern)
return True
def Undo(self):
self.pattern[self.prop] = self.old_value
wizard.W_PatternPropsChanged(self.pattern)
class DocumentChangePatternOptionName():
def __init__(self, pattern, option, new_name):
self.pattern = pattern
self.option = option
self.new_name = new_name
def Redo(self):
self.old_name = self.option.get('name')
if self.old_name == self.new_name:
return False
if self.new_name:
if self.new_name in (o.get('name') for o in self.pattern['options']):
Notify(tm.main.pattern_option_already_exist)
return False
self.option['name'] = self.new_name
elif self.old_name:
del self.option['name']
wizard.W_PatternOptionChanged(self.option)
return True
def Undo(self):
if self.old_name:
self.option['name'] = self.old_name
elif self.new_name:
del self.option['name']
wizard.W_PatternOptionChanged(self.option)
class DocumentChangePatternSignature():
def __init__(self, pattern, role, data, old_role = None):
self.pattern = pattern
self.role = role
self.data = data
self.old_role = old_role
def Redo(self):
if self.old_role:
self.old_data = self.pattern['signature'][self.old_role]
del self.pattern['signature'][self.old_role]
if self.role:
self.pattern['signature'][self.role] = self.data
wizard.W_PatternSignatureChanged(self.pattern, self.role, self.old_role)
return True
def Undo(self):
if self.role:
del self.pattern['signature'][self.role]
if self.old_role:
self.pattern['signature'][self.old_role] = self.old_data
wizard.W_PatternSignatureChanged(self.pattern, self.old_role, self.role)
class DocumentModifyPatternOptions():
def __init__(self, pattern, option, delete = False, new = True):
self.pattern = pattern
self.option = option
self.delete = delete
self.new = new
def Redo(self):
if self.delete:
if self.option not in self.pattern['options']:
return False
self.idx = self.pattern['options'].index(self.option)
del self.pattern['options'][self.idx]
wizard.W_PatternOptionDeleted(self.option)
else:
if self.option in self.pattern['options']:
return False
if not self.new:
counter = 1
name = self.option.get('name')
while name:
for o in self.pattern['options']:
if name == o.get('name'):
name = '%s%i'%(self.option['name'], counter)
counter += 1
break
else:
self.option['name'] = name
break
self.pattern['options'].append(self.option)
wizard.W_PatternOptionAdded(self.pattern, self.option, new = self.new)
self.new = False
return True
def Undo(self):
if self.delete:
self.pattern['options'].insert(self.idx, self.option)
wizard.W_PatternOptionAdded(self.pattern, self.option)
else:
self.pattern['options'].remove(self.option)
wizard.W_PatternOptionDeleted(self.option)
class DocumentModifyPatternOptionParts():
def __init__(self, option, part, delete = False):
self.option = option
self.part = part
self.delete = delete
def Redo(self):
if self.delete:
if self.part not in self.option['parts']:
return False
self.idx = self.option['parts'].index(self.part)
del self.option['parts'][self.idx]
wizard.W_PatternOptionPartDeleted(self.option, self.part)
else:
if self.part in self.option['parts']:
return False
if 'type' not in self.part or not self.part['type'].startswith('patterns.'):
name = self.part.get('self', None)
self.part['self'] = GenPartName(self.option, name)
self.option['parts'].append(self.part)
wizard.W_PatternOptionPartAdded(self.option, self.part)
return True
def Undo(self):
if self.delete:
self.option['parts'].insert(self.idx, self.part)
wizard.W_PatternOptionPartAdded(self.option, self.part)
else:
self.option['parts'].remove(self.part)
wizard.W_PatternOptionPartDeleted(self.option, self.part)
class DocumentChangePatternOptionPartIndex():
def __init__(self, option, part, new_idx):
self.option = option
self.part = part
self.new_idx = new_idx
def Redo(self):
if self.new_idx >= len(self.option['parts']):
return False
self.old_idx = self.option['parts'].index(self.part)
if self.old_idx == self.new_idx:
return False
del self.option['parts'][self.old_idx]
self.option['parts'].insert(self.new_idx, self.part)
wizard.W_PatternOptionPartIndexChanged(self.option, self.part)
return True
def Undo(self):
del self.option['parts'][self.new_idx]
self.option['parts'].insert(self.old_idx, self.part)
wizard.W_PatternOptionPartIndexChanged(self.option, self.part)
class DocumentModifyPatternOptionPartName():
def __init__(self, part, name):
self.part = part
self.name = name
def Redo(self):
self.old_name = self.part.get('self')
if self.old_name == self.name:
return False
if self.name:
self.part['self'] = self.name
else:
del self.part['self']
wizard.W_PatternOptionPartNameChanged(self.part)
def Undo(self):
if self.old_name:
self.part['self'] = self.old_name
else:
del self.part['self']
wizard.W_PatternOptionPartNameChanged(self.part)
ROLE_ADD = 0
ROLE_DELETE = 1
ROLE_BIND = 2
ROLE_RENAME = 3
ROLE_MODIFY = 4
class DocumentModifyPatternOptionPartRoles():
def __init__(self, option, part, role, value = None, action = None):
self.option = option
self.part = part
self.role = role
self.value = value
self.action = action
self.other = None
def Redo(self):
if self.action == ROLE_DELETE:
if self.role not in self.part:
return False
self.value = self.part[self.role]
del self.part[self.role]
wizard.W_PatternOptionPartRoleDeleted(self.part, self.role)
elif self.action == ROLE_ADD:
if self.role in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleAdded(self.part, self.role)
elif self.action == ROLE_BIND:
if self.value == self.part[self.role]:
return False
self.old_value = self.part[self.role]
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.role)
elif self.action == ROLE_RENAME:
if self.value == self.role:
return False
if self.value in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
self.part[self.value] = self.part[self.role]
del self.part[self.role]
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.value)
elif self.action == ROLE_MODIFY:
if self.role not in self.part:
return False
self.old_value = self.part[self.role]
if self.value[0] == self.role and self.value[1] == self.old_value:
return False
if self.value[0] != self.role:
if self.value[0] in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
del self.part[self.role]
self.part[self.value[0]] = self.value[1]
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.value[0])
if 'self' in self.part and 'type' in self.part and isinstance(self.part['type'], basestring) and self.part['type'].startswith('patterns.'):
self.other = DocumentModifyPatternOptionPartRoles(self.option, self.part, 'self', action = ROLE_DELETE)
elif 'self' not in self.part and ('type' not in self.part or not isinstance(self.part['type'], basestring) or not self.part['type'].startswith('patterns.')):
self.other = DocumentModifyPatternOptionPartRoles(self.option, self.part, 'self', value = GenPartName(self.option), action = ROLE_ADD)
if self.other and not self.other.Redo():
self.other = None
return True
def Undo(self):
if self.other:
self.other.Undo()
if self.action == ROLE_DELETE:
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleAdded(self.part, self.role)
elif self.action == ROLE_ADD:
del self.part[self.role]
wizard.W_PatternOptionPartRoleDeleted(self.part, self.role)
elif self.action == ROLE_BIND:
self.part[self.role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.role)
elif self.action == ROLE_RENAME:
self.part[self.role] = self.part[self.value]
del self.part[self.value]
wizard.W_PatternOptionPartRoleChanged(self.part, self.value, self.role)
elif self.action == ROLE_MODIFY:
del self.part[self.value[0]]
self.part[self.role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.value[0], self.role)
class DocumentChangePatternOptionPartRole():
def __init__(self, option, part, old_role, role, value):
self.option = option
self.part = part
self.old_role = old_role
self.role = role
self.value = value
self.name = None
def Redo(self):
self.old_value = self.part[self.old_role]
if self.old_value == self.value and self.old_role == self.role:
return False
del self.part[self.old_role]
self.part[self.role] = self.value
if self.role == 'type' and self.value.startswith('patterns.') and 'self' in self.part:
self.name = self.part['self']
del self.part['self']
wizard.W_PatternOptionPartRoleChanged(self.part, self.old_role, self.role)
return True
def Undo(self):
del self.part[self.role]
if self.name:
self.part['self'] = GenPartName(self.option, self.name)
self.part[self.old_role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.old_role)
| lgpl-3.0 | 7,084,697,861,847,309,000 | 33.226981 | 165 | 0.581519 | false |
vprime/puuuu | env/lib/python2.7/site-packages/django/utils/translation/trans_real.py | 35 | 25577 | """Translation helper functions."""
from __future__ import unicode_literals
import locale
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from django.utils.importlib import import_module
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import memoize
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
_checked_languages = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
check_for_language = memoize(check_for_language, _checked_languages, 1)
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if lang_code:
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
if priority:
priority = float(priority)
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| mit | 2,143,377,446,980,991,000 | 36.835799 | 143 | 0.57407 | false |
towerjoo/mindsbook | django/contrib/localflavor/es/forms.py | 309 | 7537 | # -*- coding: utf-8 -*-
"""
Spanish-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
import re
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|8|9)\d{8}$',
max_length=None, min_length=None, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHKLMNPQS'
self.nie_types = 'XT'
id_card_re = re.compile(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), re.IGNORECASE)
super(ESIdentityCardNumberField, self).__init__(id_card_re, max_length=None, min_length=None,
error_message=self.default_error_messages['invalid%s' % (self.only_nif and '_only_nif' or '')],
*args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
nif_get_checksum = lambda d: self.nif_control[int(d)%23]
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
get_checksum = lambda d: str(11 - sum([int(digit) * int(control) for digit, control in zip(d, control_str)]) % 11).replace('10', '1').replace('11', '0')
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
from es_regions import REGION_CHOICES
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
from es_provinces import PROVINCE_CHOICES
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)]) for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
| bsd-3-clause | -8,684,023,893,457,458,000 | 39.740541 | 160 | 0.611782 | false |
GarciaPL/TrafficCity | Streets4MPI/utils.py | 2 | 1073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
# Copyright 2012 Joachim Nitschke
#
# This file is part of Streets4MPI.
#
# Streets4MPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Streets4MPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Streets4MPI. If not, see <http://www.gnu.org/licenses/>.
#
from array import array
from itertools import repeat
def merge_arrays(arrays):
merged_array = array("I", repeat(0, len(arrays[0])))
for arr in arrays:
if arr != None:
for index in range(0, len(arr)):
merged_array[index] += arr[index]
return merged_array
| gpl-2.0 | -8,991,258,293,936,046,000 | 29.657143 | 70 | 0.706431 | false |
google-research/privateFM | privateFM/FM_simulate.py | 1 | 4753 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulation (not actual implementation) for private FM sketch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import sqrt, log, exp, ceil
import numpy as np
import scipy.integrate as integrate
import scipy.special
from privateFM.utils import generate_max_geom, EasyDict
# ------------------------------------------------------------------------------
# FM sketch
# ------------------------------------------------------------------------------
def FM(k, gamma, eta, m, seed):
"""Non private FM.
Returns:
m rv ~ max{eta, max{Z_1,..., Z_k}} where Z_i~Geom(gamma/(1+gamma)).
"""
if k == 0:
print('FM gets k=0')
return -1
return generate_max_geom(k, gamma, eta, m, seed)
def set_k_p_eta(config):
"""A helper function for computing k_p and eta."""
epsilon, delta, m, gamma = config.epsilon, config.delta, config.m, config.gamma
if not 0 < epsilon < float('inf') or not 0 < delta < 1:
k_p = 0
eta = 0
else:
eps1 = epsilon / 4 / sqrt(m * log(1 / delta))
k_p = ceil(1 / (exp(eps1) - 1))
eta = ceil(-log(1 - exp(-eps1)) / log(1 + gamma))
if config.morePhantom:
k_p = max((1 + gamma)**eta, k_p)
return k_p, eta
def FMPrivate(k, config, seed, estimation_option='quantile'):
"""Private FM.
Args:
k: true # distinct
config: contains epsilon, delta, m, gamma
seed: random seed
estimation_option: quantile, mean_harmo, mean_geom
Returns:
estimation, i_max
"""
if config.epsilon > 0 and 0 < config.delta < 1:
assert config.epsilon <= 2 * log(1 / config.delta)
k_p, eta = set_k_p_eta(config)
I = FM(k + k_p, config.gamma, eta, config.m, seed)
param = EasyDict(config=config, k_p=k_p, factor=0)
return make_estimate(I, estimation_option, param), I
# ------------------------------------------------------------------------------
# Estimation
# ------------------------------------------------------------------------------
def make_estimate(I, option, param):
"""Make the final cardinality estimation given I.
Args:
option: quantile, mean_harmo, mean_geom
param: a dictionary containing k_p and config and factor (if use quantile)
Returns:
estimation
"""
assert option in ['quantile', 'mean_harmo', 'mean_geom']
gamma = param.config.gamma
k_p = param.k_p
m = param.config.m
I = np.array(I)
if option == 'quantile':
factor = param.factor
return (1 + gamma)**np.quantile(I, exp(-1) - gamma * factor) - k_p
debias = get_debias(m, option, gamma)
if option == 'mean_geom': # Durand & Frajolet http://algo.inria.fr/flajolet/Publications/DuFl03.pdf
return (1 + gamma)**np.mean(I) * debias - k_p
if option == 'mean_harmo': # HLL https://en.wikipedia.org/wiki/HyperLogLog
return m / np.sum(np.power(1 + gamma, -I)) * debias - k_p
raise ValueError('make_estimation gets wrong option.')
def get_debias(m, option, gamma):
if option == 'mean_geom':
return (scipy.special.gamma(-1 / m) *
((1 + gamma)**(-1 / m) - 1) / log(1 + gamma))**(-m) / (1 + gamma)
if option == 'mean_harmo':
if gamma == 1.0:
if m <= 16:
debias = 0.673
elif m <= 32:
debias = 0.697
elif m <= 64:
debias = 0.709
elif m >= 128:
debias = 0.7213 / (1 + 1.079 / m)
return debias
else:
debias = 1 / integrate.quad(
lambda u: (log((u + 1 + gamma) /
(u + 1)) / log(1 + gamma))**m * m, 0, float('inf'))[0]
if debias > 2:
m = 10000
debias = 1 / integrate.quad(
lambda u:
(log((u + 1 + gamma) /
(u + 1)) / log(1 + gamma))**m * m, 0, float('inf'))[0]
# print('gamma is larger than 2, changed')
return debias
| apache-2.0 | 3,542,566,447,659,207,000 | 32.95 | 106 | 0.524721 | false |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/click/_compat.py | 19 | 23399 | import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
CYGWIN = sys.platform.startswith('cygwin')
# Determine local App Engine environment, per Google's own suggestion
APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
WIN = sys.platform.startswith('win') and not APP_ENGINE
DEFAULT_COLUMNS = 80
_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors,
force_readable=False, force_writable=False):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors,
force_readable=False, force_writable=False, **extra):
self._stream = stream = _FixupStream(stream, force_readable,
force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(self, stream, force_readable=False, force_writable=False):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
if self._force_readable:
return True
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
if self._force_writable:
return True
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
int_types = (int, long)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
#
# There are also Windows environments where the `msvcrt` module is not
# available (which is why we use try-catch instead of the WIN variable
# here), such as the Google App Engine development server on Windows. In
# those cases there is just nothing we can do.
def set_binary_mode(f):
return f
try:
import msvcrt
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
try:
import fcntl
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
return f
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
_wrap_std_stream('stdout')
return set_binary_mode(sys.stdout)
def get_binary_stderr():
_wrap_std_stream('stderr')
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
_wrap_std_stream('stdout')
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
_wrap_std_stream('stderr')
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
int_types = (int,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors,
force_readable=False):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors,
force_readable=force_readable)
def _force_correct_text_writer(text_writer, encoding, errors,
force_writable=False):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors,
force_writable=force_writable)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if any(m in mode for m in ['w', 'a', 'x']):
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func() # In case wrapper_func() modified the stream
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
| apache-2.0 | -1,365,738,049,940,437,000 | 32.284495 | 80 | 0.587632 | false |
CCI-MOC/nova | nova/config.py | 14 | 2488 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import options
from oslo_log import log
from nova import debugger
from nova import paths
from nova import rpc
from nova import version
CONF = cfg.CONF
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite')
# NOTE(mikal): suds is used by the vmware driver, removing this will
# cause many extraneous log lines for their tempest runs. Refer to
# https://review.openstack.org/#/c/219225/ for details.
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo_messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'glanceclient=WARN']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s '
'%(user_identity)s] %(instance)s'
'%(message)s')
def parse_args(argv, default_config_files=None):
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.register_options(CONF)
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
debugger.register_cli_opts()
CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
rpc.init(CONF)
| apache-2.0 | 7,662,810,754,682,524,000 | 40.466667 | 78 | 0.649116 | false |
fx19880617/helix | helix-core/src/main/scripts/integration-test/script/pexpect.py | 11 | 76727 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to Pexpect -- the function, run() and the class,
spawn. You can call the run() function to execute a command and return the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The more powerful interface is the spawn class. You can use this to spawn an
external child command and then interact with the child by sending lines and
expecting responses.
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
Free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2008 Noah Spurrier
http://pexpect.sourceforge.net/
$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
"""
try:
import os, sys, time
import select
import string
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError, e:
raise ImportError (str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.3'
__revision__ = '$Revision: 399 $'
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
'split_command_line', '__version__', '__revision__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo [email protected]:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh [email protected] 'ls -l'", events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback. """
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
if events is not None:
patterns = events.keys()
responses = events.values()
else:
patterns=None # We assume that EOF or TIMEOUT will save us.
responses=None
child_result_list = []
event_count = 0
while 1:
try:
index = child.expect (patterns)
if type(child.after) in types.StringTypes:
child_result_list.append(child.before + child.after)
else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
child_result_list.append(child.before)
if type(responses[index]) in types.StringTypes:
child.send(responses[index])
elif type(responses[index]) is types.FunctionType:
callback_result = responses[index](locals())
sys.stdout.flush()
if type(callback_result) in types.StringTypes:
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError ('The callback must be a string or function type.')
event_count = event_count + 1
except TIMEOUT, e:
child_result_list.append(child.before)
break
except EOF, e:
child_result_list.append(child.before)
break
child_result = ''.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn (object):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications. """
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn ('/usr/bin/ftp')
child = pexpect.spawn ('/usr/bin/ssh [email protected]')
child = pexpect.spawn ('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn ('/usr/bin/ftp', [])
child = pexpect.spawn ('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn ('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > log_list.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
self.status = None # status returned by os.waitpid
self.flag_eof = False
self.pid = None
self.child_fd = -1 # initially closed
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
self.logfile_read = None # input from child (read_nonblocking)
self.logfile_send = None # output to send (send, sendline)
self.maxread = maxread # max bytes to read at one time into buffer
self.buffer = '' # This is the read buffer. See maxread.
self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
# Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
self.softspace = False # File-like object.
self.name = '<' + repr(self) + '>' # File-like object.
self.encoding = None # File-like object.
self.closed = True # File-like object.
self.cwd = cwd
self.env = env
self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
# Solaris uses internal __fork_pty(). All others use pty.fork().
if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# allow dummy instances for subclasses that may not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn (command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except AttributeError:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__ + ' (' + __revision__ + ')')
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self,command,args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may haved spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if type(command) == type(0):
raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
if type (args) != type([]):
raise TypeError ('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
self.args = args[:] # work with a copy
self.args.insert (0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join (self.args) + '>'
assert self.pid is None, 'The pid member should be None.'
assert self.command is not None, 'The command member should not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError, e:
raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
else: # Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0: # Child
try:
self.child_fd = sys.stdout.fileno() # used by setwinsize()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range (3, max_fd):
try:
os.close (i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty if still connected.
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
os.setsid()
# Verify we are disconnected from controlling tty
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR);
if fd < 0:
raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
else:
os.close(fd)
def fileno (self): # File-like object.
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush (self): # File-like object.
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty (self): # File-like object.
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho (self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn ('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout is None then this method to block forever until ECHO flag is
False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho (self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho (self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.expect (['1234'])
p.expect (['1234'])
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['abcd'])
p.expect (['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['1234'])
p.expect (['1234'])
p.expect (['abcd'])
p.expect (['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
# and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking (self, size = 1, timeout = -1):
"""This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely. If timeout is -1
then the self.timeout value is used. If timeout is 0 then the child is
polled and if there was no data immediately ready then this will raise
a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError ('I/O operation on closed file in read_nonblocking().')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
if not r:
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
elif self.__irix_hack:
# This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
# This adds a 2 second delay, but only when the child is terminated.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
r,w,e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their processes are alive;
# then timeout on the select; and then finally admit that they are not alive.
self.flag_eof = True
raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
else:
raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError, e: # Linux does this
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
if s == '': # BSD style
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
if self.logfile is not None:
self.logfile.write (s)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write (s)
self.logfile_read.flush()
return s
raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
def read (self, size = -1): # File-like object.
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return ''
if size < 0:
self.expect (self.delimiter) # delimiter default is EOF
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile('.{%d}' % size, re.DOTALL)
index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.after ### self.before should be ''. Should I assert this?
return self.before
def readline (self, size = -1): # File-like object.
"""This reads and returns one entire line. A trailing newline is kept
in the string, but may be absent when a file ends with an incomplete
line. Note: This readline() looks for a \\r\\n pair even on UNIX
because this is what the pseudo tty device returns. So contrary to what
you may expect you will receive the newline as \\r\\n. An empty string
is returned when EOF is hit immediately. Currently, the size argument is
mostly ignored, so this behavior is not standard for a file-like
object. If size is 0 then an empty string is returned. """
if size == 0:
return ''
index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
if index == 0:
return self.before + '\r\n'
else:
return self.before
def __iter__ (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
return self
def next (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == "":
raise StopIteration
return result
def readlines (self, sizehint = -1): # File-like object.
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional "sizehint" argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s): # File-like object.
"""This is similar to send() except that there is no return value.
"""
self.send (s)
def writelines (self, sequence): # File-like object.
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write (s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
if self.logfile is not None:
self.logfile.write (s)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write (s)
self.logfile_send.flush()
c = os.write(self.child_fd, s)
return c
def sendline(self, s=''):
"""This is like send(), but it adds a line feed (os.linesep). This
returns the number of bytes written. """
n = self.send(s)
n = n + self.send (os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a>=97 and a<=122:
a = a - ord('a') + 1
return self.send (chr(a))
d = {'@':0, '`':0,
'[':27, '{':27,
'\\':28, '|':28,
']':29, '}': 29,
'^':30, '~':30,
'_':31,
'?':127}
if char not in d:
return 0
return self.send (chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write (self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write (self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send (char)
def eof (self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError, e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(); but, technically, the child
is still alive until its output is read. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect ('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form of waitpid to get
# status of a defunct process. This is super-lame. The flag_eof would have
# been set in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError, e: # No child processes
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# I have to do this twice for Solaris. I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process wishes to
# report, and the value of status is undefined.
if pid == 0:
try:
pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
except OSError, e: # This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then
# the process really is alive. This seems to work on all platforms, except
# for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
# take care of this situation (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if type(patterns) is not types.ListType:
patterns = [patterns]
compile_flags = re.DOTALL # Allow dot to match \n
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if type(p) in types.StringTypes:
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif type(p) is type(re.compile('')):
compiled_pattern_list.append(p)
else:
raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
return compiled_pattern_list
def expect(self, pattern, timeout = -1, searchwindowsize=None):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first match
in the stream is chosen. If more than one pattern matches at that point,
the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect (['bar', 'foo', 'foobar'])
# returns 1 ('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect (['foobar', 'foo'])
# returns 0 ('foobar') if all input is available at once,
# but returs 1 ('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect (['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect (pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT (which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and what
to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True: # Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end : ]
self.before = incoming[ : searcher.start]
self.after = incoming[searcher.start : searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout < 0 and timeout is not None:
raise TIMEOUT ('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking (self.maxread, timeout)
freshlen = len(c)
time.sleep (0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF, e:
self.buffer = ''
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF (str(e) + '\n' + str(self))
except TIMEOUT, e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT (str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, r, c):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
TIOCSWINSZ = -2146929561 # Same bits, but with sign.
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', r, c, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
self.stdout.write (self.buffer)
self.stdout.flush()
self.buffer = ''
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
"""This is used by the interact() method.
"""
while self.isalive():
r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter: data = output_filter(data)
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter: data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error, e:
if e[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread (self, maxread):
"""This method is no longer supported or allowed. I don't like getters
and setters without a good reason. """
raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
def setlog (self, fileobject):
"""This method is no longer supported or allowed.
"""
raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
##############################################################################
# End of spawn class
##############################################################################
class searcher_string (object):
"""This is a plain string search helper for the spawn.expect_any() method.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in zip(range(len(strings)), strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
ss.append((-1,'searcher_string:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen+len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re (object):
"""This is regular expression string search helper for the
spawn.expect_any() method.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(range(len(patterns)), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
ss.append((-1,'searcher_re:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer)-searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which (filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename already contains a path.
if os.path.dirname(filename) != '':
if os.access (filename, os.X_OK):
return filename
if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
# Oddly enough this was the one line that made Pexpect
# incompatible with Python 1.5.2.
#pathlist = p.split (os.pathsep)
pathlist = string.split (p, os.pathsep)
for path in pathlist:
f = os.path.join(path, filename)
if os.access(f, os.X_OK):
return f
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
state_whitespace = 4 # The state of consuming whitespace between commands.
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\': # Escape the next character
state = state_esc
elif c == r"'": # Handle single quote
state = state_singlequote
elif c == r'"': # Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
None # Do nothing.
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:ts=4:sw=4:expandtab:ft=python:
| apache-2.0 | 2,595,335,226,405,041,700 | 40.162554 | 205 | 0.604142 | false |
google/llvm-propeller | lldb/test/API/lang/objc/objc-struct-argument/TestObjCStructArgument.py | 3 | 2329 | """Test passing structs to Objective-C methods."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestObjCStructArgument(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.main_source = "test.m"
self.break_line = line_number(
self.main_source, '// Set breakpoint here.')
@skipUnlessDarwin
@add_test_categories(['pyapi'])
@skipIf(debug_info=no_match(["gmodules"]), oslist=['ios', 'watchos', 'tvos', 'bridgeos'], archs=['armv7', 'arm64']) # this test program only builds for ios with -gmodules
def test_with_python_api(self):
"""Test passing structs to Objective-C methods."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
bpt = target.BreakpointCreateByLocation(
self.main_source, self.break_line)
self.assertTrue(bpt, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread_list = lldbutil.get_threads_stopped_at_breakpoint(process, bpt)
# Make sure we stopped at the first breakpoint.
self.assertTrue(
len(thread_list) != 0,
"No thread stopped at our breakpoint.")
self.assertEquals(len(thread_list), 1,
"More than one thread stopped at our breakpoint.")
frame = thread_list[0].GetFrameAtIndex(0)
self.assertTrue(frame, "Got a valid frame 0 frame.")
self.expect("p [summer sumThings:tts]", substrs=['9'])
self.expect(
"po [NSValue valueWithRect:rect]",
substrs=['NSRect: {{0, 0}, {10, 20}}'])
# Now make sure we can call a method that returns a struct without
# crashing.
cmd_value = frame.EvaluateExpression("[provider getRange]")
self.assertTrue(cmd_value.IsValid())
| apache-2.0 | -3,360,934,878,902,984,700 | 34.287879 | 175 | 0.629025 | false |
sharifulgeo/networkx | networkx/algorithms/components/tests/test_semiconnected.py | 64 | 1901 | from itertools import chain
import networkx as nx
from nose.tools import *
class TestIsSemiconnected(object):
def test_undirected(self):
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.Graph())
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.MultiGraph())
def test_empty(self):
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.DiGraph())
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.MultiDiGraph())
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
ok_(nx.is_semiconnected(G))
def test_path(self):
G = nx.path_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G.add_edge(100, 99)
ok_(not nx.is_semiconnected(G))
def test_cycle(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G = nx.path_graph(100, create_using=nx.DiGraph())
G.add_edge(0, 99)
ok_(nx.is_semiconnected(G))
def test_tree(self):
G = nx.DiGraph()
G.add_edges_from(chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)]
for i in range(100)))
ok_(not nx.is_semiconnected(G))
def test_dumbbell(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100))
ok_(not nx.is_semiconnected(G)) # G is disconnected.
G.add_edge(100, 99)
ok_(nx.is_semiconnected(G))
def test_alternating_path(self):
G = nx.DiGraph(chain.from_iterable([(i, i - 1), (i, i + 1)]
for i in range(0, 100, 2)))
ok_(not nx.is_semiconnected(G))
| bsd-3-clause | -6,045,707,382,820,668,000 | 34.867925 | 77 | 0.556549 | false |
oliviertilmans/ipmininet | ipmininet/host/config/named.py | 1 | 15108 | """Base classes to configure a Named daemon"""
import os
from typing import List, Union, Sequence, Optional
from ipaddress import IPv4Address, IPv6Address, ip_address
from mininet.log import lg
from ipmininet.overlay import Overlay
from ipmininet.utils import realIntfList, find_node, has_cmd
from ipmininet.router.config.utils import ConfigDict
from .base import HostDaemon
DNS_REFRESH = 86400
DNS_RETRY = 7200
DNS_EXPIRE = 3600000
DNS_MIN_TTL = 172800
class Named(HostDaemon):
NAME = 'named'
KILL_PATTERNS = (NAME,)
def __init__(self, node, **kwargs):
# Check if apparmor is enabled in the distribution
self.apparmor = has_cmd("aa-exec")
super().__init__(node, **kwargs)
@property
def startup_line(self):
# This runs the daemon outside of AppArmor's restrictions
return '{apparmor}{name} -c {cfg} -f -u root -p {port}' \
.format(apparmor="aa-exec -p unconfined " if self.apparmor else "",
name=self.NAME,
cfg=self.cfg_filename,
port=self.options.dns_server_port)
@property
def dry_run(self):
return '{name} {cfg}' \
.format(name='named-checkconf', cfg=self.cfg_filename)
def build(self):
cfg = super().build()
cfg.log_severity = self.options.log_severity
cfg.abs_logfile = os.path.abspath(cfg.logfile)
cfg.zones = ConfigDict()
for zone in self._node.get('dns_zones', []):
cfg.zones[self.zone_filename(zone.name)] = self.build_zone(zone)
self.build_reverse_zone(cfg.zones)
return cfg
def build_zone(self, zone: 'DNSZone') -> ConfigDict:
master_ips = []
for s_name in zone.servers + [zone.dns_master] + zone.dns_slaves:
server_itf = find_node(self._node, s_name)
if server_itf is None:
lg.error("Cannot find the server node {name} of DNS zone"
" {zone}. Are you sure that they are connected to "
"the current node {current}?"
.format(name=s_name, zone=zone.name,
current=self._node.name))
continue
server = server_itf.node
for itf in realIntfList(server):
for ip in itf.ips():
if ".arpa" not in zone.name: # Not a Reverse zone
zone.soa_record.add_record(ARecord(s_name,
ip.ip.compressed))
if s_name == zone.dns_master:
master_ips.append(ip.ip.compressed)
for ip6 in itf.ip6s(exclude_lls=True):
if ".arpa" not in zone.name: # Not a Reverse zone
zone.soa_record.add_record(
AAAARecord(s_name, ip6.ip.compressed))
if s_name == zone.dns_master:
master_ips.append(ip6.ip.compressed)
return ConfigDict(name=zone.soa_record.domain_name,
soa_record=zone.soa_record,
records=zone.soa_record.records,
master=self._node.name == zone.dns_master,
master_ips=master_ips)
def build_reverse_zone(self, cfg_zones: ConfigDict):
"""
Build non-existing PTR records. Then, adds them to an existing reverse
zone if any. The remaining ones are inserted in a new reverse zone
that is added to cfg_zones dictionary.
"""
# Build PTR records
ptr_records = []
for zone in cfg_zones.values():
for record in zone.soa_record.records:
if record.rtype != "A" and record.rtype != "AAAA":
continue
domain_name = record.domain_name if record.full_domain_name \
else record.domain_name + "." + zone.name
ptr_records.append(PTRRecord(record.address, domain_name,
ttl=record.ttl))
existing_records = [record for zone in cfg_zones.values()
for record in zone.soa_record.records
if record.rtype == "PTR"]
ptr_v6_records = []
ptr_v4_records = []
for record in ptr_records:
# Filter out existing PTR records
if record in existing_records:
continue
# Try to place the rest in existing reverse DNS zones
found = False
for zone in cfg_zones.values():
if zone.name in record.domain_name:
zone.soa_record.records.append(record)
found = True
break
# The rest needs a new DNS zone
if not found:
if record.v6:
ptr_v6_records.append(record)
else:
ptr_v4_records.append(record)
# Create new reverse DNS zones for remaining PTR records
if len(ptr_v6_records) > 0:
self.build_largest_reverse_zone(cfg_zones, ptr_v6_records)
if len(ptr_v4_records) > 0:
self.build_largest_reverse_zone(cfg_zones, ptr_v4_records)
def build_largest_reverse_zone(self, cfg_zones: ConfigDict,
records: List[Union['PTRRecord',
'NSRecord']]):
"""
Create the ConfigDict object representing a new reverse zone whose
prefix is the largest one that includes all the PTR records.
Then it adds it to the cfg_zones dict.
:param cfg_zones: The dict of ConfigDict representing existing zones
:param records: The list of PTR records to place a new reverse zone
"""
if len(records) == 0:
return
# Find common prefix between all records
common = records[0].domain_name.split(".")
for i in range(1, len(records)):
prefix = records[i].domain_name.split(".")
for j in range(1, len(common)):
if prefix[len(prefix)-j] != common[len(common)-j]:
common = common[len(prefix)+1-j:]
break
domain_name = ".".join(common)
# Retrieve the NS Record for the new zone
ns_record = None
for zone in cfg_zones.values():
if "arpa" in zone.name:
continue
for record in zone.soa_record.records:
if record.rtype == "NS" \
and self._node.name in record.name_server:
ns_record = NSRecord(record.domain_name, self._node.name)
ns_record.domain_name = domain_name
if ns_record is None:
lg.warning("Cannot forge a DNS reverse zone because there is no"
" NS Record for this node in regular zones.\n")
return
records.append(ns_record)
# Build the reverse zone
soa_record = SOARecord(domain_name=domain_name, records=records)
reverse_zone = ConfigDict(name=soa_record.domain_name,
soa_record=soa_record,
records=soa_record.records,
master=True,
master_ips=[])
self._node.params.setdefault('dns_zones', []).append(reverse_zone)
cfg_zones[self.zone_filename(reverse_zone.name)] = reverse_zone
def set_defaults(self, defaults):
""":param log_severity: It controls the logging levels and may take the
values defined. Logging will occur for any message equal to or
higher than the level specified (=>) lower levels will not be
logged. These levels are 'critical', 'error', 'warning',
'notice', 'info', 'debug' and 'dynamic'.
:param dns_server_port: The port number of the dns server"""
defaults.log_severity = "warning"
defaults.dns_server_port = 53
super().set_defaults(defaults)
def zone_filename(self, domain_name: str) -> str:
return self._file(suffix='%s.cfg' % domain_name)
@property
def cfg_filenames(self):
return super().cfg_filenames + \
[self.zone_filename(z.name)
for z in self._node.get('dns_zones', [])]
@property
def template_filenames(self):
return super().template_filenames + \
["%s-zone.mako" % self.NAME
for _ in self._node.get('dns_zones', [])]
class DNSRecord:
def __init__(self, rtype: str, domain_name: str, ttl=60):
self.rtype = rtype
self.domain_name = domain_name
self.ttl = ttl
if self.domain_name[-1] != "." and "." in self.domain_name:
# Full DNS names should be ended by a dot in the config
self.domain_name = self.domain_name + "."
@property
def rdata(self) -> str:
return ""
@property
def full_domain_name(self) -> bool:
return "." in self.domain_name
def __eq__(self, other):
return self.rtype == other.rtype \
and self.domain_name == other.domain_name \
and self.rdata == other.rdata
class ARecord(DNSRecord):
def __init__(self, domain_name,
address: Union[str, IPv4Address, IPv6Address], ttl=60):
self.address = ip_address(str(address))
rtype = "A" if self.address.version == 4 else "AAAA"
super().__init__(rtype=rtype, domain_name=domain_name, ttl=ttl)
@property
def rdata(self):
return self.address.compressed
class AAAARecord(ARecord):
pass # ARecord already handles IPv6 addresses
class PTRRecord(DNSRecord):
def __init__(self, address: Union[str, IPv4Address, IPv6Address],
domain_name: str, ttl=60):
self.address = ip_address(str(address))
self.mapped_domain_name = domain_name
if self.mapped_domain_name[-1] != "." \
and "." in self.mapped_domain_name:
# Full DNS names should be ended by a dot in the config
self.mapped_domain_name = self.mapped_domain_name + "."
super().__init__("PTR", self.address.reverse_pointer, ttl=ttl)
@property
def v6(self):
return self.address.version == 6
@property
def rdata(self):
return self.mapped_domain_name
class NSRecord(DNSRecord):
def __init__(self, domain_name, name_server: str, ttl=60):
super().__init__(rtype="NS", domain_name=domain_name, ttl=ttl)
self.name_server = name_server
if "." not in self.name_server:
self.name_server = self.name_server + "." + self.domain_name
if self.name_server[-1] != ".":
# Full DNS names should be ended by a dot in the config
self.name_server = self.name_server + "."
@property
def rdata(self):
return self.name_server
class SOARecord(DNSRecord):
def __init__(self, domain_name, refresh_time=DNS_REFRESH,
retry_time=DNS_RETRY, expire_time=DNS_EXPIRE,
min_ttl=DNS_MIN_TTL, records: Sequence[DNSRecord] = ()):
super().__init__(rtype="SOA", domain_name=domain_name, ttl=min_ttl)
self.refresh_time = refresh_time
self.retry_time = retry_time
self.expire_time = expire_time
self._records = list(records)
@property
def rdata(self):
return "{domain_name} sysadmin.{domain_name} (\n1 ; serial\n{refresh}" \
" ; refresh timer\n{retry} ; retry timer\n{expire}" \
" ; retry timer\n{min_ttl} ; minimum ttl\n)"\
.format(domain_name=self.domain_name, refresh=self.refresh_time,
retry=self.retry_time, expire=self.expire_time,
min_ttl=self.ttl)
@property
def records(self):
return self._records
def add_record(self, record: DNSRecord):
if record not in self._records:
self._records.append(record)
class DNSZone(Overlay):
def __init__(self, name: str, dns_master: str,
dns_slaves: Sequence[str] = (),
records: Sequence[DNSRecord] = (), nodes: Sequence[str] = (),
refresh_time=DNS_REFRESH, retry_time=DNS_RETRY,
expire_time=DNS_EXPIRE, min_ttl=DNS_MIN_TTL,
ns_domain_name: Optional[str] = None):
"""
:param name: The domain name of the zone
:param dns_master: The name of the master DNS server
:param dns_slaves: The list of names of DNS slaves
:param records: The list of DNS Records to be included in the zone
:param nodes: The list of nodes for which one A/AAAA record has to be
created for each of their IPv4/IPv6 addresses
:param refresh_time: The number of seconds before the zone should be
refreshed
:param retry_time: The number of seconds before a failed refresh should
be retried
:param expire_time: The upper limit in seconds before a zone is
considered no longer authoritative
:param min_ttl: The negative result TTL
:param ns_domain_name: If it is defined, it is the suffix of the domain
of the name servers, otherwise, parameter 'name'
is used.
"""
self.name = name
self.dns_master = dns_master
self.dns_slaves = list(dns_slaves)
self.records = records
self.servers = list(nodes)
self.soa_record = SOARecord(name, refresh_time=refresh_time,
retry_time=retry_time,
expire_time=expire_time, min_ttl=min_ttl,
records=records)
super().__init__(nodes=[dns_master] + list(dns_slaves))
self.consistent = True
for node_name in [dns_master] + self.dns_slaves + self.servers:
if "." in node_name:
lg.error("Cannot create zone {name} because the node name"
" {node_name} contains a '.'"
.format(name=name, node_name=node_name))
self.consistent = False
self.ns_domain_name = ns_domain_name if ns_domain_name is not None \
else self.name
def check_consistency(self, topo):
return super().check_consistency(topo) and self.consistent
def apply(self, topo):
super().apply(topo)
if not self.consistent:
return
# Add NS Records (if not already present)
for n in self.nodes:
self.soa_record.add_record(NSRecord(self.name,
n + "." + self.ns_domain_name))
for n in self.nodes:
topo.nodeInfo(n).setdefault("dns_zones", []).append(self)
| gpl-2.0 | -5,009,223,093,324,447,000 | 38.34375 | 80 | 0.551893 | false |
a113n/bcbio-nextgen | bcbio/distributed/multi.py | 4 | 3761 | """Run tasks in parallel on a single machine using multiple cores.
"""
import functools
try:
import joblib
except ImportError:
joblib = False
from bcbio.distributed import resources
from bcbio.log import logger, setup_local_logging
from bcbio.pipeline import config_utils
from bcbio.provenance import diagnostics, system
def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel
def get_fn(fn_name, parallel):
taskmod = "multitasks"
imodule = parallel.get("module", "bcbio.distributed")
import_fn_name = parallel.get("wrapper", fn_name)
return getattr(__import__("{base}.{taskmod}".format(base=imodule, taskmod=taskmod),
fromlist=[taskmod]),
import_fn_name)
def zeromq_aware_logging(f):
"""Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
config = None
for arg in args:
if config_utils.is_std_config_arg(arg):
config = arg
break
elif config_utils.is_nested_config_arg(arg):
config = arg["config"]
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]):
config = arg[0]["config"]
break
assert config, "Could not find config dictionary in function arguments."
if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"):
handler = setup_local_logging(config, config["parallel"])
else:
handler = None
try:
out = f(*args, **kwargs)
finally:
if handler and hasattr(handler, "close"):
handler.close()
return out
return wrapper
def run_multicore(fn, items, config, parallel=None):
"""Run the function using multiple cores on the given items to process.
"""
if len(items) == 0:
return []
if parallel is None or "num_jobs" not in parallel:
if parallel is None:
parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)}
sysinfo = system.get_info({}, parallel)
parallel = resources.calculate(parallel, items, sysinfo, config,
parallel.get("multiplier", 1),
max_multicore=int(parallel.get("max_multicore", sysinfo["cores"])))
items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items):
if data:
out.extend(data)
return out
| mit | 44,629,325,778,426,500 | 41.258427 | 134 | 0.608881 | false |
quizlet/grpc | tools/run_tests/python_utils/upload_test_results.py | 7 | 4239 | #!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to upload Jenkins test results to BQ"""
from __future__ import print_function
import os
import six
import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
_DATASET_ID = 'jenkins_test_results'
_DESCRIPTION = 'Test results from master job run on Jenkins'
# 90 days in milliseconds
_EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = 'DAY'
_PROJECT_ID = 'grpc-testing'
_RESULTS_SCHEMA = [
('job_name', 'STRING', 'Name of Jenkins job'),
('build_id', 'INTEGER', 'Build ID of Jenkins job'),
('build_url', 'STRING', 'URL of Jenkins job'),
('test_name', 'STRING', 'Individual test name'),
('language', 'STRING', 'Language of test'),
('platform', 'STRING', 'Platform used for test'),
('config', 'STRING', 'Config used for test'),
('compiler', 'STRING', 'Compiler used for test'),
('iomgr_platform', 'STRING', 'Iomgr used for test'),
('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
('elapsed_time', 'FLOAT', 'How long test took to run'),
('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
('return_code', 'INTEGER', 'Exit code of test'),
]
def _get_build_metadata(test_results):
"""Add Jenkins/Kokoro build metadata to test_results based on environment
variables set by Jenkins/Kokoro.
"""
build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
if build_id:
test_results['build_id'] = build_id
if build_url:
test_results['build_url'] = build_url
if job_name:
test_results['job_name'] = job_name
def upload_results_to_bq(resultset, bq_table, args, platform):
"""Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
args: args in run_tests.py, generated by argparse
platform: string name of platform tests were run on
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
for result in results:
test_results = {}
_get_build_metadata(test_results)
test_results['compiler'] = args.compiler
test_results['config'] = args.config
test_results['cpu_estimated'] = result.cpu_estimated
test_results['cpu_measured'] = result.cpu_measured
test_results['elapsed_time'] = '%.2f' % result.elapsed_time
test_results['iomgr_platform'] = args.iomgr_platform
# args.language is a list, but will always have one element in the contexts
# this function is used.
test_results['language'] = args.language[0]
test_results['platform'] = platform
test_results['result'] = result.state
test_results['return_code'] = result.returncode
test_results['test_name'] = shortname
test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
print('Error uploading result to bigquery.')
sys.exit(1)
| apache-2.0 | 5,109,806,664,730,658,000 | 38.616822 | 113 | 0.674687 | false |
tcheehow/MissionPlanner | Lib/email/_parseaddr.py | 53 | 16241 | # Copyright (C) 2002-2007 Python Software Foundation
# Contact: [email protected]
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
| gpl-3.0 | -7,104,405,601,707,556,000 | 30.678068 | 78 | 0.512961 | false |
Sjors/bitcoin | test/functional/wallet_backup.py | 22 | 8943 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletBackupTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self, args=()):
self.start_node(0, self.extra_args[0] + list(args))
self.start_node(1, self.extra_args[1] + list(args))
self.start_node(2, self.extra_args[2] + list(args))
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
def init_three(self):
self.init_wallet(0)
self.init_wallet(1)
self.init_wallet(2)
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
self.nodes[3].generate(100)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
if not self.options.descriptors:
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three(["-nowallet"])
self.init_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| mit | -367,623,148,411,312,060 | 37.882609 | 187 | 0.634686 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/test/test_error.py | 20 | 4619 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.internet import error
import socket
class TestStringification(unittest.TestCase):
"""Test that the exceptions have useful stringifications.
"""
listOfTests = [
#(output, exception[, args[, kwargs]]),
("An error occurred binding to an interface.",
error.BindError),
("An error occurred binding to an interface: foo.",
error.BindError, ['foo']),
("An error occurred binding to an interface: foo bar.",
error.BindError, ['foo', 'bar']),
("Couldn't listen on eth0:4242: Foo.",
error.CannotListenError,
('eth0', 4242, socket.error('Foo'))),
("Message is too long to send.",
error.MessageLengthError),
("Message is too long to send: foo bar.",
error.MessageLengthError, ['foo', 'bar']),
("DNS lookup failed.",
error.DNSLookupError),
("DNS lookup failed: foo bar.",
error.DNSLookupError, ['foo', 'bar']),
("An error occurred while connecting.",
error.ConnectError),
("An error occurred while connecting: someOsError.",
error.ConnectError, ['someOsError']),
("An error occurred while connecting: foo.",
error.ConnectError, [], {'string': 'foo'}),
("An error occurred while connecting: someOsError: foo.",
error.ConnectError, ['someOsError', 'foo']),
("Couldn't bind.",
error.ConnectBindError),
("Couldn't bind: someOsError.",
error.ConnectBindError, ['someOsError']),
("Couldn't bind: someOsError: foo.",
error.ConnectBindError, ['someOsError', 'foo']),
("Hostname couldn't be looked up.",
error.UnknownHostError),
("No route to host.",
error.NoRouteError),
("Connection was refused by other side.",
error.ConnectionRefusedError),
("TCP connection timed out.",
error.TCPTimedOutError),
("File used for UNIX socket is no good.",
error.BadFileError),
("Service name given as port is unknown.",
error.ServiceNameUnknownError),
("User aborted connection.",
error.UserError),
("User timeout caused connection failure.",
error.TimeoutError),
("An SSL error occurred.",
error.SSLError),
("Connection to the other side was lost in a non-clean fashion.",
error.ConnectionLost),
("Connection to the other side was lost in a non-clean fashion: foo bar.",
error.ConnectionLost, ['foo', 'bar']),
("Connection was closed cleanly.",
error.ConnectionDone),
("Connection was closed cleanly: foo bar.",
error.ConnectionDone, ['foo', 'bar']),
("Uh.", #TODO nice docstring, you've got there.
error.ConnectionFdescWentAway),
("Tried to cancel an already-called event.",
error.AlreadyCalled),
("Tried to cancel an already-called event: foo bar.",
error.AlreadyCalled, ['foo', 'bar']),
("Tried to cancel an already-cancelled event.",
error.AlreadyCancelled),
("A process has ended without apparent errors: process finished with exit code 0.",
error.ProcessDone,
[None]),
("A process has ended with a probable error condition: process ended.",
error.ProcessTerminated),
("A process has ended with a probable error condition: process ended with exit code 42.",
error.ProcessTerminated,
[],
{'exitCode': 42}),
("A process has ended with a probable error condition: process ended by signal SIGBUS.",
error.ProcessTerminated,
[],
{'signal': 'SIGBUS'}),
("The Connector was not connecting when it was asked to stop connecting.",
error.NotConnectingError),
("The Port was not listening when it was asked to stop listening.",
error.NotListeningError),
]
def testThemAll(self):
for entry in self.listOfTests:
output = entry[0]
exception = entry[1]
try:
args = entry[2]
except IndexError:
args = ()
try:
kwargs = entry[3]
except IndexError:
kwargs = {}
self.failUnlessEqual(
str(exception(*args, **kwargs)),
output)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -828,514,530,527,048,600 | 28.420382 | 97 | 0.579996 | false |
amyvmiwei/neon | neon/util/param.py | 4 | 1219 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Contains various functions for checking and setting required and optional
parameters.
"""
def req_param(obj, paramlist):
for param in paramlist:
if not hasattr(obj, param):
raise ValueError("req param %s missing for %s" % (param,
obj.__class__.__name__))
def opt_param(obj, paramlist, default_value=None):
for param in paramlist:
if not hasattr(obj, param):
setattr(obj, param, default_value)
| apache-2.0 | 4,460,993,313,797,256,000 | 38.322581 | 78 | 0.606235 | false |
tysonclugg/django | tests/template_loader/tests.py | 153 | 7435 | from django.template import TemplateDoesNotExist
from django.template.loader import (
get_template, render_to_string, select_template,
)
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
}])
class TemplateLoaderTests(SimpleTestCase):
def test_get_template_first_engine(self):
template = get_template("template_loader/hello.html")
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_get_template_second_engine(self):
template = get_template("template_loader/goodbye.html")
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_get_template_using_engine(self):
template = get_template("template_loader/hello.html", using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_get_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
get_template("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_first_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_select_template_second_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_select_template_using_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_select_template_empty(self):
with self.assertRaises(TemplateDoesNotExist):
select_template([])
def test_select_template_string(self):
with self.assertRaisesMessage(
TypeError,
"select_template() takes an iterable of template names but got a "
"string: 'template_loader/hello.html'. Use get_template() if you "
"want to load a single template by name."
):
select_template('template_loader/hello.html')
def test_select_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
select_template(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_tries_all_engines_before_names(self):
template = select_template(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_render_to_string_first_engine(self):
content = render_to_string("template_loader/hello.html")
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_second_engine(self):
content = render_to_string("template_loader/goodbye.html")
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_request(self):
request = RequestFactory().get('/foobar/')
content = render_to_string("template_loader/request.html", request=request)
self.assertEqual(content, "/foobar/\n")
def test_render_to_string_using_engine(self):
content = render_to_string("template_loader/hello.html", using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_render_to_string_with_list_first_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_with_list_second_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_list_using_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_with_list_empty(self):
with self.assertRaises(TemplateDoesNotExist):
render_to_string([])
def test_render_to_string_with_list_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[1].backend.name, 'django')
self.assertEqual(
e.exception.chain[2].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[2].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[3].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[3].backend.name, 'django')
def test_render_to_string_with_list_tries_all_engines_before_names(self):
content = render_to_string(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
| bsd-3-clause | -6,058,954,020,228,486,000 | 43.255952 | 83 | 0.628245 | false |
wearpants/osf.io | website/addons/box/tests/test_serializer.py | 11 | 1096 | # -*- coding: utf-8 -*-
"""Serializer tests for the Box addon."""
import mock
from nose.tools import * # noqa (PEP8 asserts)
from website.addons.base.testing.serializers import StorageAddonSerializerTestSuiteMixin
from website.addons.box.tests.utils import MockBox
from website.addons.box.tests.factories import BoxAccountFactory
from website.addons.box.serializer import BoxSerializer
from tests.base import OsfTestCase
mock_client = MockBox()
class TestBoxSerializer(StorageAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'box'
Serializer = BoxSerializer
ExternalAccountFactory = BoxAccountFactory
client = mock_client
def setUp(self):
self.mock_valid = mock.patch.object(
BoxSerializer,
'credentials_are_valid',
return_value=True
)
self.mock_valid.start()
super(TestBoxSerializer, self).setUp()
def tearDown(self):
self.mock_valid.stop()
super(TestBoxSerializer, self).tearDown()
def set_provider_id(self, pid):
self.node_settings.folder_id = pid
| apache-2.0 | -2,965,500,493,141,645,300 | 28.621622 | 88 | 0.708029 | false |
TorpedoXL/namebench | libnamebench/nameserver_list.py | 173 | 24485 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to work with bunches of nameservers."""
__author__ = '[email protected] (Thomas Stromberg)'
import datetime
import operator
import Queue
import random
import sys
import threading
import time
# 3rd party libraries
import dns.resolver
import conn_quality
import addr_util
import nameserver
import util
NS_CACHE_SLACK = 2
CACHE_VER = 4
PREFERRED_HEALTH_TIMEOUT_MULTIPLIER = 1.5
SYSTEM_HEALTH_TIMEOUT_MULTIPLIER = 2
TOO_DISTANT_MULTIPLIER = 4.75
DEFAULT_MAX_SERVERS_TO_CHECK = 350
# If we can't ping more than this, go into slowmode.
MIN_PINGABLE_PERCENT = 5
MIN_HEALTHY_PERCENT = 10
SLOW_MODE_THREAD_COUNT = 6
# Windows behaves in unfortunate ways if too many threads are specified
DEFAULT_THREAD_COUNT = 35
MAX_INITIAL_HEALTH_THREAD_COUNT = 35
class OutgoingUdpInterception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TooFewNameservers(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ThreadFailure(Exception):
def __init__(self):
pass
class QueryThreads(threading.Thread):
"""Quickly see which nameservers are awake."""
def __init__(self, input_queue, results_queue, action_type, checks=None):
threading.Thread.__init__(self)
self.input = input_queue
self.action_type = action_type
self.results = results_queue
self.checks = checks
self.halt = False
def stop(self):
self.halt = True
def run(self):
"""Iterate over the queue, processing each item."""
while not self.halt and not self.input.empty():
# check_wildcards is special: it has a tuple of two nameservers
if self.action_type == 'wildcard_check':
try:
(ns, other_ns) = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled or other_ns.is_disabled:
self.results.put(None)
continue
else:
self.results.put((ns, ns.TestSharedCache(other_ns)))
# everything else only has a single nameserver.
else:
try:
ns = self.input.get_nowait()
except Queue.Empty:
return
if ns.is_disabled:
self.results.put(None)
continue
if self.action_type == 'ping':
self.results.put(ns.CheckHealth(fast_check=True))
elif self.action_type == 'health':
self.results.put(ns.CheckHealth(sanity_checks=self.checks))
elif self.action_type == 'final':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, final_check=True))
elif self.action_type == 'port_behavior':
self.results.put(ns.CheckHealth(sanity_checks=self.checks, port_check=True))
elif self.action_type == 'censorship':
self.results.put(ns.CheckCensorship(self.checks))
elif self.action_type == 'store_wildcards':
self.results.put(ns.StoreWildcardCache())
elif self.action_type == 'node_id':
self.results.put(ns.UpdateNodeIds())
elif self.action_type == 'update_hostname':
self.results.put(ns.UpdateHostname())
else:
raise ValueError('Invalid action type: %s' % self.action_type)
class NameServers(list):
def __init__(self, thread_count=DEFAULT_THREAD_COUNT, max_servers_to_check=DEFAULT_MAX_SERVERS_TO_CHECK):
self._ips = set()
self.thread_count = thread_count
super(NameServers, self).__init__()
self.client_latitude = None
self.client_longitude = None
self.client_country = None
self.client_domain = None
self.client_asn = None
self.max_servers_to_check = max_servers_to_check
@property
def visible_servers(self):
return [x for x in self if not x.is_hidden]
@property
def enabled_servers(self):
return [x for x in self.visible_servers if not x.is_disabled]
@property
def disabled_servers(self):
return [x for x in self.visible_servers if x.is_disabled]
@property
def enabled_keepers(self):
return [x for x in self.enabled_servers if x.is_keeper]
@property
def enabled_supplemental(self):
return [x for x in self.enabled_servers if not x.is_keeper]
@property
def supplemental_servers(self):
return [x for x in self if not x.is_keeper]
@property
def country_servers(self):
return [x for x in self if x.country_code == self.client_country]
# Return a list of servers that match a particular tag
def HasTag(self, tag):
return [x for x in self if x.HasTag(tag)]
# Return a list of servers that match a particular tag
def HasVisibleTag(self, tag):
return [x for x in self.visible_servers if x.HasTag(tag)]
def SortEnabledByFastest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('check_average'))
def SortEnabledByNearest(self):
"""Return a list of healthy servers in fastest-first order."""
return sorted(self.enabled_servers, key=operator.attrgetter('fastest_check_duration'))
def msg(self, msg, count=None, total=None, **kwargs):
if self.status_callback:
self.status_callback(msg, count=count, total=total, **kwargs)
else:
print '%s [%s/%s]' % (msg, count, total)
def _GetObjectForIP(self, ip):
return [x for x in self if x.ip == ip][0]
def _MergeNameServerData(self, ns):
existing = self._GetObjectForIP(ns.ip)
existing.tags.update(ns.tags)
if ns.system_position is not None:
existing.system_position = ns.system_position
elif ns.dhcp_position is not None:
existing.dhcp_position = ns.dhcp_position
def append(self, ns):
"""Add a nameserver to the list, guaranteeing uniqueness."""
if ns.ip in self._ips:
self._MergeNameServerData(ns)
else:
super(NameServers, self).append(ns)
self._ips.add(ns.ip)
def SetTimeouts(self, timeout, ping_timeout, health_timeout):
if len(self.enabled_servers) > 1:
cq = conn_quality.ConnectionQuality(status_callback=self.status_callback)
(intercepted, avg_latency, max_latency) = cq.CheckConnectionQuality()[0:3]
if intercepted:
raise OutgoingUdpInterception(
'Your router or Internet Service Provider appears to be intercepting '
'and redirecting all outgoing DNS requests. This means you cannot '
'benchmark or utilize alternate DNS servers. Please adjust your '
'router configuration or file a support request with your ISP.'
)
if (max_latency * 2) > health_timeout:
health_timeout = max_latency * 2
self.msg('Set health timeout to %.2fs' % health_timeout)
if (max_latency * 1.1) > ping_timeout:
ping_timeout = avg_latency * 1.4
self.msg('Set ping timeout to %.2fs' % ping_timeout)
for ns in self:
ns.timeout = timeout
ns.ping_timeout = ping_timeout
ns.health_timeout = health_timeout
def SetClientLocation(self, latitude, longitude, client_country):
self.client_latitude = latitude
self.client_longitude = longitude
self.client_country = client_country
def SetNetworkLocation(self, domain, asn):
self.client_domain = domain
self.client_asn = asn
def FilterByTag(self, include_tags=None, require_tags=None):
for ns in self:
if include_tags:
if not ns.MatchesTags(include_tags):
ns.tags.add('hidden')
if require_tags:
for tag in require_tags:
if not ns.HasTag(tag):
ns.tags.add('hidden')
if not self.enabled_servers:
raise TooFewNameservers('No nameservers specified matched tags %s %s' % (include_tags, require_tags))
if require_tags:
self.msg("%s of %s nameservers have tags: %s (%s required)" %
(len(self.visible_servers), len(self), ', '.join(include_tags),
', '.join(require_tags)))
else:
self.msg("%s of %s nameservers have tags: %s" %
(len(self.visible_servers), len(self), ', '.join(include_tags)))
def HasEnoughInCountryServers():
return len(self.country_servers) > self.max_servers_to_check
def NearbyServers(self, max_distance):
srv_by_dist = sorted([(x.DistanceFromCoordinates(self.client_latitude, self.client_longitude), x)
for x in self.HasVisibleTag('regional')], key=operator.itemgetter(0))
for distance, ns in srv_by_dist:
if distance < float(max_distance):
yield ns
def AddNetworkTags(self):
"""Add network tags for each nameserver."""
if self.client_domain:
provider = self.client_domain.split('.')[0]
else:
provider = None
for ns in self:
ns.AddNetworkTags(self.client_domain, provider, self.client_asn, self.client_country)
def AddLocalityTags(self, max_distance):
if self.client_latitude:
count = 0
for ns in self.NearbyServers(max_distance):
count += 1
if count > self.max_servers_to_check:
break
ns.tags.add('nearby')
def DisableSlowestSupplementalServers(self, multiplier=TOO_DISTANT_MULTIPLIER, max_servers=None,
prefer_asn=None):
"""Disable servers who's fastest duration is multiplier * average of best 10 servers."""
if not max_servers:
max_servers = self.max_servers_to_check
supplemental_servers = self.enabled_supplemental
fastest = [x for x in self.SortEnabledByFastest()][:10]
best_10 = util.CalculateListAverage([x.fastest_check_duration for x in fastest])
cutoff = best_10 * multiplier
self.msg("Removing secondary nameservers slower than %0.2fms (max=%s)" % (cutoff, max_servers))
for (idx, ns) in enumerate(self.SortEnabledByFastest()):
hide = False
if ns not in supplemental_servers:
continue
if ns.fastest_check_duration > cutoff:
hide = True
if idx > max_servers:
hide = True
if hide:
matches = ns.MatchesTags(nameserver.PROVIDER_TAGS)
if matches:
self.msg("%s seems slow, but has tag: %s" % (ns, matches))
else:
ns.tags.add('hidden')
def _FastestByLocalProvider(self):
"""Find the fastest DNS server by the client provider."""
fastest = self.SortEnabledByFastest()
# Give preference in tag order
for tag in nameserver.PROVIDER_TAGS:
for ns in fastest:
if ns.HasTag(tag):
return ns
def HideBrokenIPV6Servers(self):
"""Most people don't care about these."""
for ns in self.disabled_servers:
if ns.HasTag('ipv6') and not ns.is_hidden:
ns.tags.add('hidden')
def HideSlowSupplementalServers(self, target_count):
"""Given a target count, delete nameservers that we do not plan to test."""
# Magic secondary mixing algorithm:
# - Half of them should be the "nearest" nameservers
# - Half of them should be the "fastest average" nameservers
self.msg("Hiding all but %s servers" % target_count)
keepers = self.enabled_keepers
isp_keeper = self._FastestByLocalProvider()
if isp_keeper:
self.msg("%s is the fastest DNS server provided by your ISP." % isp_keeper)
keepers.append(isp_keeper)
supplemental_servers_needed = target_count - len(keepers)
if supplemental_servers_needed < 1 or not self.enabled_supplemental:
return
nearest_needed = int(supplemental_servers_needed / 2.0)
if supplemental_servers_needed < 50:
self.msg("Picking %s secondary servers to use (%s nearest, %s fastest)" %
(supplemental_servers_needed, nearest_needed, supplemental_servers_needed - nearest_needed))
# Phase two is picking the nearest secondary server
supplemental_servers_to_keep = []
for ns in self.SortEnabledByNearest():
if ns not in keepers:
if not supplemental_servers_to_keep and supplemental_servers_needed < 15:
self.msg('%s appears to be the nearest regional (%0.2fms)' % (ns, ns.fastest_check_duration))
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= nearest_needed:
break
# Phase three is hiding the slower secondary servers
for ns in self.SortEnabledByFastest():
if ns not in keepers and ns not in supplemental_servers_to_keep:
supplemental_servers_to_keep.append(ns)
if len(supplemental_servers_to_keep) >= supplemental_servers_needed:
break
for ns in self.supplemental_servers:
if ns not in supplemental_servers_to_keep and ns not in keepers:
ns.tags.add('hidden')
def CheckHealth(self, sanity_checks=None, max_servers=11, prefer_asn=None):
"""Filter out unhealthy or slow replica servers."""
self.PingNameServers()
if len(self.enabled_servers) > max_servers:
self.DisableSlowestSupplementalServers(prefer_asn=prefer_asn)
self.RunHealthCheckThreads(sanity_checks['primary'])
if len(self.enabled_servers) > max_servers:
self._DemoteSecondaryGlobalNameServers()
self.HideSlowSupplementalServers(int(max_servers * NS_CACHE_SLACK))
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunNodeIdThreads()
self.HideSlowSupplementalServers(max_servers)
self.RunFinalHealthCheckThreads(sanity_checks['secondary'])
self.RunNodeIdThreads()
self.HideBrokenIPV6Servers()
# One more time!
if len(self.enabled_servers) > 1:
self.RunNodeIdThreads()
self.CheckCacheCollusion()
self.RunHostnameThreads()
if not self.enabled_servers:
raise TooFewNameservers('None of the nameservers tested are healthy')
def CheckCensorship(self, sanity_checks):
pass
def _RemoveGlobalWarnings(self):
"""If all nameservers have the same warning, remove it. It's likely false."""
ns_count = len(self.enabled_servers)
seen_counts = {}
# No sense in checking for duplicate warnings if we only have one server.
if len(self.enabled_servers) == 1:
return
for ns in self.enabled_servers:
for warning in ns.warnings:
seen_counts[warning] = seen_counts.get(warning, 0) + 1
for warning in seen_counts:
if seen_counts[warning] == ns_count:
self.msg('All nameservers have warning: %s (likely a false positive)' % warning)
for ns in self.enabled_servers:
ns.warnings.remove(warning)
def _DemoteSecondaryGlobalNameServers(self):
"""For global nameservers, demote the slower IP to secondary status."""
seen = {}
for ns in self.SortEnabledByFastest():
if ns.MatchesTags(['preferred', 'global']):
if ns.provider in seen and not ns.MatchesTags(['system', 'specified']):
faster_ns = seen[ns.provider]
if ns.HasTag('preferred'):
self.msg('Making %s the primary anycast - faster than %s by %2.2fms' %
(faster_ns.name_and_node, ns.name_and_node, ns.check_average - faster_ns.check_average))
ns.tags.add('hidden')
else:
seen[ns.provider] = ns
def ResetTestResults(self):
"""Reset the testng status of all disabled hosts."""
return [ns.ResetTestStatus() for ns in self]
def CheckCacheCollusion(self):
"""Mark if any nameservers share cache, especially if they are slower."""
self.RunWildcardStoreThreads()
sleepy_time = 4
self.msg("Waiting %ss for TTL's to decrement." % sleepy_time)
time.sleep(sleepy_time)
test_combos = []
good_nameservers = [x for x in self.SortEnabledByFastest()]
for ns in good_nameservers:
for compare_ns in good_nameservers:
if ns != compare_ns:
test_combos.append((compare_ns, ns))
results = self.RunCacheCollusionThreads(test_combos)
while not results.empty():
(ns, shared_ns) = results.get()
if shared_ns:
ns.shared_with.add(shared_ns)
shared_ns.shared_with.add(ns)
if ns.is_disabled or shared_ns.is_disabled:
continue
if ns.check_average > shared_ns.check_average:
slower = ns
faster = shared_ns
else:
slower = shared_ns
faster = ns
if slower.system_position == 0:
faster.DisableWithMessage('Shares-cache with current primary DNS server')
slower.warnings.add('Replica of %s' % faster.ip)
elif slower.is_keeper and not faster.is_keeper:
faster.DisableWithMessage('Replica of %s [%s]' % (slower.name, slower.ip))
slower.warnings.add('Replica of %s [%s]' % (faster.name, faster.ip))
else:
diff = slower.check_average - faster.check_average
self.msg("Disabling %s - slower replica of %s by %0.1fms." % (slower.name_and_node, faster.name_and_node, diff))
slower.DisableWithMessage('Slower replica of %s [%s]' % (faster.name, faster.ip))
faster.warnings.add('Replica of %s [%s]' % (slower.name, slower.ip))
def _LaunchQueryThreads(self, action_type, status_message, items,
thread_count=None, **kwargs):
"""Launch query threads for a given action type.
Args:
action_type: a string describing an action type to pass
status_message: Status to show during updates.
items: A list of items to pass to the queue
thread_count: How many threads to use (int)
kwargs: Arguments to pass to QueryThreads()
Returns:
results_queue: Results from the query tests.
Raises:
TooFewNameservers: If no tested nameservers are healthy.
"""
threads = []
input_queue = Queue.Queue()
results_queue = Queue.Queue()
# items are usually nameservers
random.shuffle(items)
for item in items:
input_queue.put(item)
if not thread_count:
thread_count = self.thread_count
if thread_count > len(items):
thread_count = len(items)
status_message += ' (%s threads)' % thread_count
self.msg(status_message, count=0, total=len(items))
for _ in range(0, thread_count):
thread = QueryThreads(input_queue, results_queue, action_type, **kwargs)
try:
thread.start()
except:
self.msg("ThreadingError with %s threads: waiting for completion before retrying." % thread_count)
for thread in threads:
thread.stop()
thread.join()
raise ThreadFailure()
threads.append(thread)
while results_queue.qsize() != len(items):
self.msg(status_message, count=results_queue.qsize(), total=len(items))
time.sleep(0.5)
self.msg(status_message, count=results_queue.qsize(), total=len(items))
for thread in threads:
thread.join()
if not self.enabled_servers:
raise TooFewNameservers('None of the %s nameservers tested are healthy' % len(self.visible_servers))
return results_queue
def RunCacheCollusionThreads(self, test_combos):
"""Schedule and manage threading for cache collusion checks."""
return self._LaunchQueryThreads('wildcard_check', 'Running cache-sharing checks on %s servers' % len(self.enabled_servers), test_combos)
def PingNameServers(self):
"""Quickly ping nameservers to see which are available."""
start = datetime.datetime.now()
test_servers = list(self.enabled_servers)
try:
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (self.thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < MIN_PINGABLE_PERCENT:
self.msg('How odd! Only %0.1f percent of name servers were pingable. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', test_servers)
if self.enabled_servers:
self.msg('%s of %s servers are available (duration: %s)' %
(len(self.enabled_servers), len(test_servers), datetime.datetime.now() - start))
return results
def GetHealthyPercentage(self, compare_to=None):
if not compare_to:
compare_to = self.visible_servers
return (float(len(self.enabled_servers)) / float(len(compare_to))) * 100
def RunHealthCheckThreads(self, checks, min_healthy_percent=MIN_HEALTHY_PERCENT):
"""Quickly ping nameservers to see which are healthy."""
test_servers = self.enabled_servers
status_msg = 'Running initial health checks on %s servers' % len(test_servers)
if self.thread_count > MAX_INITIAL_HEALTH_THREAD_COUNT:
thread_count = MAX_INITIAL_HEALTH_THREAD_COUNT
else:
thread_count = self.thread_count
try:
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
except ThreadFailure:
self.msg("It looks like you couldn't handle %s threads, trying again with %s (slow)" % (thread_count, SLOW_MODE_THREAD_COUNT))
self.thread_count = SLOW_MODE_THREAD_COUNT
self.ResetTestResults()
results = self._LaunchQueryThreads('ping', 'Checking nameserver availability', list(self.visible_servers))
success_rate = self.GetHealthyPercentage(compare_to=test_servers)
if success_rate < min_healthy_percent:
self.msg('How odd! Only %0.1f percent of name servers are healthy. Trying again with %s threads (slow)'
% (success_rate, SLOW_MODE_THREAD_COUNT))
self.ResetTestResults()
self.thread_count = SLOW_MODE_THREAD_COUNT
time.sleep(5)
results = self._LaunchQueryThreads('health', status_msg, test_servers,
checks=checks, thread_count=thread_count)
self.msg('%s of %s tested name servers are healthy' %
(len(self.enabled_servers), len(test_servers)))
return results
def RunNodeIdThreads(self):
"""Update node id status on all servers."""
status_msg = 'Checking node ids on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('node_id', status_msg, list(self.enabled_servers))
def RunHostnameThreads(self):
"""Update node id status on all servers."""
status_msg = 'Updating hostnames on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('update_hostname', status_msg, list(self.enabled_servers))
def RunFinalHealthCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running final health checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('final', status_msg, list(self.enabled_servers), checks=checks)
def RunCensorshipCheckThreads(self, checks):
"""Quickly ping nameservers to see which are healthy."""
status_msg = 'Running censorship checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('censorship', status_msg, list(self.enabled_servers), checks=checks)
def RunPortBehaviorThreads(self):
"""Get port behavior data."""
status_msg = 'Running port behavior checks on %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('port_behavior', status_msg, list(self.enabled_servers))
def RunWildcardStoreThreads(self):
"""Store a wildcard cache value for all nameservers (using threads)."""
status_msg = 'Waiting for wildcard cache queries from %s servers' % len(self.enabled_servers)
return self._LaunchQueryThreads('store_wildcards', status_msg, list(self.enabled_servers))
| apache-2.0 | 7,145,423,561,848,634,000 | 36.211246 | 140 | 0.669349 | false |
yesbox/ansible | test/units/module_utils/basic/test_known_hosts.py | 74 | 2365 | # -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.module_utils import known_hosts
class TestAnsibleModuleKnownHosts(unittest.TestCase):
urls = {
'ssh://one.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'one.example.org'},
'ssh+git://two.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'two.example.org'},
'rsync://three.example.org/user/example.git':
{'is_ssh_url': False, 'get_fqdn': 'three.example.org'},
'[email protected]:user/example.git':
{'is_ssh_url': True, 'get_fqdn': 'four.example.org'},
'git+ssh://five.example.org/example.git':
{'is_ssh_url': True, 'get_fqdn': 'five.example.org'},
'ssh://six.example.org:21/example.org':
{'is_ssh_url': True, 'get_fqdn': 'six.example.org'},
'ssh://[2001:DB8::abcd:abcd]/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'ssh://[2001:DB8::abcd:abcd]:22/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'username@[2001:DB8::abcd:abcd]/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
'username@[2001:DB8::abcd:abcd]:22/example.git':
{'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
}
def test_is_ssh_url(self):
for u in self.urls:
self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url'])
def test_get_fqdn(self):
for u in self.urls:
self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn'])
| gpl-3.0 | 3,376,311,801,116,542,000 | 42 | 83 | 0.621987 | false |
NYU-DevOps-Fall2017-PromotionsTeam/promotions | features/steps/promotion_steps.py | 1 | 7975 | from os import getenv
import requests
from behave import *
import json
from app import server
from verify import expect
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
WAIT_SECONDS = 30
BASE_URL = getenv('BASE_URL', 'http://localhost:5001/')
#BASE_URL = getenv('BASE_URL', '/')
#########################################
# GIVEN STATEMENTS #
#########################################
@given(u'the following promotions')
def step_impl(context):
server.data_reset()
for row in context.table:
server.data_load(
row['id'],
{
"name": row['name'],
"promo_type": row['promo_type'],
"value": float(row['value']),
"start_date": row['start_date'],
"end_date": row['end_date'],
"detail": row['detail']
}
)
#########################################
# WHEN STATEMENTS #
#########################################
@when(u'I visit "{page}"')
def step_impl(context, page):
# print("Targer URL", BASE_URL +'{}'.format(page))
context.resp = context.app.get('http://localhost:5001/' +'{}'.format(page))
@when(u'I visit the root url')
def step_impl(context):
context.driver.get(context.base_url)
print(context.driver.current_url)
@when(u'I press the "{button}" button')
def step_impl(context, button):
button_id = button.lower() + '-btn'
context.driver.find_element_by_id(button_id).click()
@when(u'I set the "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'promo_' + element_name.lower()
element = context.driver.find_element_by_id(element_id)
element.clear()
element.send_keys(text_string)
@when(u'I change field "{element_name}" to "{text_string}"')
def step_impl(context, element_name, text_string):
element_id = 'promo_' + element_name.lower()
#element = context.driver.find_element_by_id(element_id)
element = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.presence_of_element_located((By.ID, element_id))
)
element.clear()
element.send_keys(text_string)
@when(u'I retrieve "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
context.resp = context.app.get('http://localhost:5001/' + target_url)
context.data = json.loads(context.resp.data.decode('utf-8'))
assert isinstance(context.data, dict)
@when(u'I update "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
headers = {'content-type': 'application/json'}
data = json.dumps(context.data)
context.resp = context.app.put('http://localhost:5001/' + target_url, data=data, headers=headers)
assert context.resp.status_code == 200
@when(u'I change "{key}" to "{value}"')
def step_impl(context, key, value):
if key == 'value':
value = float(value)
context.data[key] = value
@when(u'I delete "{url}" with id "{id}"')
def step_impl(context, url, id):
target_url = '{}/{}'.format(url, id)
context.resp = context.app.delete('http://localhost:5001/' + target_url)
assert context.resp.status_code == 204
@when(u'I create a promotion')
def step_impl(context):
target_url = 'promotions'
headers = {'content-type': 'application/json'}
data=json.dumps({})
context.resp = context.app.post('http://localhost:5001/' + target_url, data=data, headers=headers)
@when(u'I call POST with Incorrect content-type')
def step_impl(context):
target_url = 'promotions'
#headers = {'content-type': 'application/json'}
headers = {'content-type': 'not_application/json'}
data=json.dumps({})
context.resp = context.app.post('http://localhost:5001/' + target_url, data=data, headers=headers)
@when(u'I send a PUT request to \'/promotions/delete-all\'')
def step_impl(context):
target_url = 'promotions/delete-all'
context.resp = context.app.put('http://localhost:5001/' + target_url)
#########################################
# THEN STATEMENTS #
#########################################
@then(u'I should see "{message}" in the title')
def step_impl(context, message):
""" Check the document title for a message """
expect(context.driver.title).to_contain(message)
@then(u'I should get a response code "{code}"')
def step_impl(context, code):
code = int(code)
assert context.resp.status_code == code
@then(u'There should be "{count}" promotions')
def step_impl(context, count):
count = int(count)
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
assert len(data) == count
else:
assert isinstance(data, dict)
@then(u'I should see "{promo_name}"')
def step_impl(context, promo_name):
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
names = [promo['name'] for promo in data]
assert promo_name in names
else:
assert data['name'] == promo_name
@then(u'I should not see "{promo_name}"')
def step_impl(context, promo_name):
data = json.loads(context.resp.data.decode('utf-8'))
if isinstance(data, list):
names = [promo['name'] for promo in data]
assert promo_name not in names
else:
assert data['name'] != promo_name
@then(u'I will see "{promo_name}" with "{key}" as "{value}"')
def step_impl(context, promo_name, key, value):
data = json.loads(context.resp.data.decode('utf-8'))
if key == 'value':
value = float(value)
if isinstance(data, list):
for promo in data:
if promo['name'] == promo_name:
assert promo[key] == value
break
else:
assert data[key] == value
@then(u'I will not see a promotion with "{key}" as "{value}"')
def step_impl(context, key, value):
data = json.loads(context.resp.data.decode('utf-8'))
if key == 'value':
value = float(value)
if isinstance(data, list):
for promo in data:
assert promo[key] != value
else:
assert data[key] != value
@then(u'I reset the server db for further tests')
def step_impl(context):
server.data_reset()
@then(u'I should see the message "{message}"')
def step_impl(context, message):
#element = context.driver.find_element_by_id('flash_message')
#expect(element.text).to_contain(message)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'flash_message'),
message
)
)
expect(found).to_be(True)
@then(u'I should see "{name}" in the results')
def step_impl(context, name):
#element = context.driver.find_element_by_id('search_results')
#expect(element.text).to_contain(name)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element(
(By.ID, 'search_results'),
name
)
)
expect(found).to_be(True)
@then(u'I should not see "{name}" in the results')
def step_impl(context, name):
element = context.driver.find_element_by_id('search_results')
error_msg = "I should not see '%s' in '%s'" % (name, element.text)
expect(element.text).to_not_contain(name)
@then(u'I should see "{text_string}" in the "{element_name}" field')
def step_impl(context, text_string, element_name):
element_id = 'promo_' + element_name.lower()
#element = context.driver.find_element_by_id(element_id)
found = WebDriverWait(context.driver, WAIT_SECONDS).until(
expected_conditions.text_to_be_present_in_element_value(
(By.ID, element_id),
text_string
)
)
#expect(element.get_attribute('value')).to_equal(text_string)
expect(found).to_be(True) | apache-2.0 | -5,777,792,518,639,897,000 | 33.829694 | 102 | 0.614169 | false |
wuyuewen/libcloud | libcloud/test/common/test_gandi.py | 66 | 1293 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.utils.py3 import xmlrpclib
from libcloud.test import MockHttp
class BaseGandiMockHttp(MockHttp):
def _get_method_name(self, type, use_param, qs, path):
return "_xmlrpc"
def _xmlrpc(self, method, url, body, headers):
params, methodName = xmlrpclib.loads(body)
meth_name = '_xmlrpc__' + methodName.replace('.', '_')
if self.type:
meth_name = '%s_%s' % (meth_name, self.type)
return getattr(self, meth_name)(method, url, body, headers)
| apache-2.0 | 2,610,235,585,873,094,000 | 42.1 | 74 | 0.720031 | false |
erikge/watch_gyp | test/generator-output/gyptest-rules.py | 19 | 1839 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
'-G', 'xcode_ninja_target_pattern=^pull_in_all_actions$',
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
| bsd-3-clause | -7,982,053,386,074,177,000 | 29.65 | 72 | 0.700924 | false |
googleapis/python-compute | google/cloud/compute_v1/services/region_notification_endpoints/transports/__init__.py | 1 | 1104 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import RegionNotificationEndpointsTransport
from .rest import RegionNotificationEndpointsRestTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[RegionNotificationEndpointsTransport]]
_transport_registry["rest"] = RegionNotificationEndpointsRestTransport
__all__ = (
"RegionNotificationEndpointsTransport",
"RegionNotificationEndpointsRestTransport",
)
| apache-2.0 | 3,745,248,837,120,232,400 | 33.5 | 74 | 0.774457 | false |
Mappy/PyLR | pylr/parser.py | 1 | 15704 | # -*- coding: utf-8 -*-
''' Location parser
.. moduleauthor:: David Marteau <[email protected]>
'''
from collections import namedtuple
from bitstring import BitStream
from .utils import lazyproperty
from .constants import (LATEST_BINARY_VERSION,
BINARY_VERSION_2,
MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION,
MIN_BYTES_POLYGON,
RELATIVE_COORD_SIZE,
IS_POINT,
HAS_ATTRIBUTES,
GEOCOORD_SIZE,
POINT_ALONG_LINE_SIZE,
POINT_WITH_ACCESS_SIZE,
POINT_OFFSET_SIZE,
AREA_CODE_CIRCLE,
AREA_CODE_RECTANGLE,
AREA_CODE_POLYGON,
RECTANGLE_SIZE,
LARGE_RECTANGLE_SIZE,
GRID_SIZE,
LARGE_GRID_SIZE,
LRP_SIZE,
CIRCLE_BASE_SIZE,
LocationType)
class BinaryParseError(Exception):
pass
class BinaryVersionError(BinaryParseError):
pass
class InvalidDataSizeError(BinaryParseError):
pass
# The Constant RFU (Reserved for Future Use)
RFU_BITS = 'uint:1'
# number of bits used for attributes flag
ATTR_FLAG_BITS = 'uint:1'
# number of bits used for point flag
POINT_FLAG_BITS = 'uint:1'
# number of bits used for version
VERSION_BITS = 'uint:3'
AREA_FLAG_BIT0 = 'uint:1'
AREA_FLAG_BIT1 = 'uint:1'
HEADER_BITS = (RFU_BITS,
ATTR_FLAG_BITS,
POINT_FLAG_BITS,
AREA_FLAG_BIT1,
POINT_FLAG_BITS,
VERSION_BITS)
_BinaryHeader = namedtuple('_BinaryHeader', ('arf', 'af', 'pf', 'ver'))
class _RawBinaryData(object):
""" Hold a location reference description as a bit stream."""
MIN_VERSION = BINARY_VERSION_2
MAX_VERSION = LATEST_BINARY_VERSION
def __init__(self, data, base64=False):
""" Constructor.
:param string data: Binaray data
:param bool base64: True if data is coded in base64
"""
if base64:
data = data.decode("base64")
#: raw data size
self._sz = len(data)
#: bit stream used to read data
self._bs = BitStream(bytes=data)
def getbits(self, *bits):
""" Read the given numbers of bits.
:param tuple bits: Tuple of number of bits to read
:returns: Tuple of bit fields
:rtype: tuple
"""
return tuple(self._bs.read(v) for v in bits)
def get_position(self):
""" Returns position in the bit stream.
:returns: Position in the bit stream
:rtype: int
"""
return self._bs.pos
@property
def num_bytes(self):
""" Size of the decoded data.
:returns: Size of the decoded data.
:rtype: int
"""
return self._sz
@property
def version(self):
""" Return binary version of the data
:returns: Binary version of the data.
:rtype: int
"""
return self.header.ver
@lazyproperty
def header(self):
""" Parse header (once) location type
:returns: Header data
:rtype: _BinaryHeader
"""
# Validate data size
if self._sz < min(MIN_BYTES_LINE_LOCATION,
MIN_BYTES_POINT_LOCATION,
MIN_BYTES_CLOSED_LINE_LOCATION):
raise InvalidDataSizeError("not enough bytes in data")
_, arf1, pf, arf0, af, ver = self.getbits(*HEADER_BITS)
arf = 2 * arf1 + arf0
return _BinaryHeader(arf, af, pf, ver)
@lazyproperty
def location_type(self):
""" Parse location type (once)
:returns: Location type
:rtype: LocationType
"""
header = self.header
# Check version
if not self.MIN_VERSION <= header.ver <= self.MAX_VERSION:
raise BinaryVersionError("Invalid binary version {}".format(header.ver))
is_point = (header.pf == IS_POINT)
has_attributes = (header.af == HAS_ATTRIBUTES)
area_code = header.arf
is_area = ((area_code == 0 and not is_point and not has_attributes) or area_code > 0)
total_bytes = self._sz
loc_type = LocationType.UNKNOWN
if not is_point and not is_area and has_attributes:
loc_type = LocationType.LINE_LOCATION
elif is_point and not is_area:
if not has_attributes:
if total_bytes == GEOCOORD_SIZE:
loc_type = LocationType.GEO_COORDINATES
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if total_bytes == POINT_ALONG_LINE_SIZE or total_bytes == (POINT_ALONG_LINE_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POINT_ALONG_LINE
elif total_bytes == POINT_WITH_ACCESS_SIZE or total_bytes == (POINT_WITH_ACCESS_SIZE + POINT_OFFSET_SIZE):
loc_type = LocationType.POI_WITH_ACCESS_POINT
else:
raise InvalidDataSizeError("Invalid byte size")
elif is_area and not is_point and has_attributes:
if total_bytes >= MIN_BYTES_CLOSED_LINE_LOCATION:
loc_type = LocationType.CLOSED_LINE
else:
raise InvalidDataSizeError("Invalid byte size")
else:
if area_code == AREA_CODE_CIRCLE:
loc_type = LocationType.CIRCLE
elif area_code == AREA_CODE_RECTANGLE:
# includes case AREA_CODE_GRID
if total_bytes == RECTANGLE_SIZE or total_bytes == LARGE_RECTANGLE_SIZE:
loc_type = LocationType.RECTANGLE
elif total_bytes == GRID_SIZE or total_bytes == LARGE_GRID_SIZE:
loc_type = LocationType.GRID
else:
raise InvalidDataSizeError("Invalid byte size")
elif area_code == AREA_CODE_POLYGON:
if not has_attributes and total_bytes >= MIN_BYTES_POLYGON:
loc_type = LocationType.POLYGON
else:
raise InvalidDataSizeError("Invalid byte size")
else:
raise BinaryParseError('Invalid header')
return loc_type
def init_binary_parsing(data, base64=False):
""" Create an instance of _RawBinaryData
The returned object can be passed to 'parse_binary'
:param string data: string describing the location
:param bool base64: True if encoded in base 64
:returns: Parsable data structure
:rtype: _RawBinaryData
"""
return _RawBinaryData(data, base64)
def parse_binary(data, base64=False):
""" Parse binary data.
Input is original data or an object returned by init_binary_parsing(...)
:param data: string (encoded or not) describing the location
:param bool base64: True if encoded in base 64
:returns: Object describing the parsed location, or an error object
"""
if not isinstance(data, _RawBinaryData):
data = _RawBinaryData(data, base64)
# Get header
loc_type = data.location_type
if loc_type == LocationType.LINE_LOCATION:
return parse_line(data)
elif loc_type == LocationType.POINT_ALONG_LINE:
return parse_point_along_line(data)
elif loc_type == LocationType.GEO_COORDINATES:
return parse_geo_coordinates(data)
elif loc_type == LocationType.POI_WITH_ACCESS_POINT:
return parse_poi_with_access_point(data)
elif loc_type == LocationType.RECTANGLE:
return parse_rectangle(data)
elif loc_type == LocationType.CLOSED_LINE:
return parse_closed_line(data)
elif loc_type == LocationType.CIRCLE:
return parse_circle(data)
elif loc_type == LocationType.GRID:
return parse_grid(data)
elif loc_type == LocationType.POLYGON:
return parse_polygon(data)
else:
return BinaryParseError("Invalid location type")
# ----------------
# Location parsers
# ----------------
HEAD_FIELDS = ('version', 'type')
from .binary import (_parse_first_lrp,
_parse_intermediate_lrp,
_parse_last_line_lrp,
_parse_last_closed_line_attrs,
_parse_offset,
_parse_relative_coordinates,
_parse_absolute_coordinates,
_parse_radius,
_parse_grid_dimensions)
# LINE_LOCATION
LineLocation = namedtuple('LineLocation', HEAD_FIELDS+('flrp', 'llrp', 'points', 'poffs', 'noffs'))
""" Line Location type
"""
def parse_line(rb):
""" Parse line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Line location
:rtype: LineLocation
"""
assert rb.location_type == LocationType.LINE_LOCATION
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
llrp, pofff, nofff = _parse_last_line_lrp(rb, rel)
poffs = _parse_offset(rb) if pofff else 0
noffs = _parse_offset(rb) if nofff else 0
return LineLocation(rb.version, rb.location_type, flrp, llrp, points, poffs, noffs)
# POINT_ALONG_LINE
PointAlongLineLocation = namedtuple('PointAlongLineLocation', HEAD_FIELDS+('flrp', 'llrp', 'poffs'))
""" Point along location type
"""
def parse_point_along_line(rb):
""" Parse point along line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Point along line location
:rtype: PointAlongLineLocation
"""
assert rb.location_type == LocationType.POINT_ALONG_LINE
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
return PointAlongLineLocation(rb.version, rb.location_type, flrp, llrp, poffs)
# GEO_COORDINATES
GeoCoordinateLocation = namedtuple('GeoCoordinateLocation', HEAD_FIELDS+('coords',))
""" Coordinate location type
"""
def parse_geo_coordinates(rb):
""" Parse geo coordinates location
:param _RawBinaryData rb: Binary data describing the location
:returns: Geographic coordinates location
:rtype: GeoCoordinateLocation
"""
assert rb.location_type == LocationType.GEO_COORDINATES
coords = _parse_absolute_coordinates(rb)
return GeoCoordinateLocation(rb.version, rb.location_type, coords)
# POI_WITH_ACCESS_POINT
PoiWithAccessPointLocation = namedtuple('PoiWithAccessPointLocation', HEAD_FIELDS+(
'flrp', 'llrp', 'poffs', 'coords'))
""" Poi with access location type
"""
def parse_poi_with_access_point(rb):
""" Parse POI with access point
:param _RawBinaryData rb: Binary data describing the location
:returns: POI with access point location
:rtype: PoiWithAccessPointLocation
"""
assert rb.location_type == LocationType.POI_WITH_ACCESS_POINT
flrp = _parse_first_lrp(rb)
llrp, pofff, _ = _parse_last_line_lrp(rb, flrp)
poffs = _parse_offset(rb) if pofff else 0
coords = _parse_relative_coordinates(rb, flrp.coords)
return PoiWithAccessPointLocation(rb.version, rb.location_type, flrp, llrp,
poffs, coords)
# CIRCLE
CircleLocation = namedtuple('CircleLocation', HEAD_FIELDS+('coords', 'radius'))
""" Circle Location type
"""
def parse_circle(rb):
""" Parse circle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Circle location
:rtype: CircleLocation
"""
assert rb.location_type == LocationType.CIRCLE
radius_size = rb.num_bytes - CIRCLE_BASE_SIZE
coords = _parse_absolute_coordinates(rb)
radius = _parse_radius(rb, radius_size)
return CircleLocation(rb.version, rb.location_type, coords, radius)
# RECTANGLE
BBox = namedtuple('BBox', ('minx', 'miny', 'maxx', 'maxy'))
RectangleLocation = namedtuple('RectangleLocation', HEAD_FIELDS+('bbox',))
""" Rectangle Location type
"""
def parse_rectangle(rb):
""" Parse rectangle location
:param _RawBinaryData rb: Binary data describing the location
:returns: Rectangle location
:rtype: RectangleLocation
"""
assert rb.location_type == LocationType.RECTANGLE
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_RECTANGLE_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
return RectangleLocation(rb.version, rb.location_type, bbox)
# GRID
GridLocation = namedtuple('GridLocation', HEAD_FIELDS+('bbox', 'cols', 'rows'))
""" Grid Location type
"""
def parse_grid(rb):
""" Parse grid location
:param _RawBinaryData rb: Binary data describing the location
:returns: Grid location
:rtype: GridLocation
"""
assert rb.location_type == LocationType.GRID
bl = _parse_absolute_coordinates(rb)
if rb.num_bytes == LARGE_GRID_SIZE:
tr = _parse_absolute_coordinates(rb)
else:
tr = _parse_relative_coordinates(rb, bl)
bbox = BBox(bl.lon, bl.lat, tr.lon, tr.lat)
cols, rows = _parse_grid_dimensions(rb)
return GridLocation(rb.version, rb.location_type, bbox, cols, rows)
# CLOSED LINE
ClosedLineLocation = namedtuple('ClosedLineLocation', HEAD_FIELDS+('flrp', 'points', 'frc', 'fow', 'bear'))
def parse_closed_line(rb):
""" Parse closed line location
:param _RawBinaryData rb: Binary data describing the location
:returns: Closed line location
:rtype: ClosedLineLocation
"""
assert rb.location_type == LocationType.CLOSED_LINE
# number of intermediates points
num_intermediates = (rb.num_bytes - MIN_BYTES_CLOSED_LINE_LOCATION) / LRP_SIZE
flrp = _parse_first_lrp(rb)
points = []
rel = flrp
for _ in range(num_intermediates):
ilrp = _parse_intermediate_lrp(rb, rel)
points.append(ilrp)
rel = ilrp
frc, fow, bear = _parse_last_closed_line_attrs(rb)
return ClosedLineLocation(rb.version, rb.location_type, flrp, points, frc, fow, bear)
# CLOSED LINE
PolygonLocation = namedtuple('PolygonLocation', HEAD_FIELDS+('points',))
def parse_polygon(rb):
""" Parse polygon location
:param _RawBinaryData rb: Binary data describing the location
:returns: Polygon location
:rtype: PolygonLocation
"""
assert rb.location_type == LocationType.POLYGON
# number of points
# MIN_BYTES_POLYGON include first point and 2 relatives points
num_intermediates = 2 + (rb.num_bytes - MIN_BYTES_POLYGON) / RELATIVE_COORD_SIZE
points = []
rel = _parse_absolute_coordinates(rb)
points.append(rel)
for _ in range(num_intermediates):
ilrp = _parse_relative_coordinates(rb, rel)
points.append(ilrp)
rel = ilrp
return PolygonLocation(rb.version, rb.location_type, points)
| apache-2.0 | -743,408,833,915,572,900 | 30.035573 | 122 | 0.59972 | false |
ArchiveLabs/dweb_gateway | python/Multihash.py | 1 | 5374 | """
A set of classes to hold different kinds of hashes etc and convert between them,
Much of this was adapted from https://github.com/tehmaze/python-multihash,
which seems to have evolved from the pip3 multihash, which is seriously broken.
"""
import hashlib
import struct
import sha3
import pyblake2
import base58
import binascii
import logging
from sys import version as python_version
if python_version.startswith('3'):
from urllib.parse import urlparse
else:
from urlparse import urlparse # See https://docs.python.org/2/library/urlparse.html
from .Errors import MultihashError
class Multihash(object):
"""
Superclass for all kinds of hashes, this is for convenience in passing things around between some places that want binary, or
multihash or hex.
core storage is as a multihash_binary i.e. [ code, length, digest...]
Each instance:
code = SHA1, SHA256 etc (uses integer conventions from multihash
"""
# Constants
# 0x01..0x0F are app specific (unused)
SHA1 = 0x11
SHA2_256 = 0x12
SHA2_512 = 0x13
SHA3 = 0x14
BLAKE2B = 0x40
BLAKE2S = 0x41
FUNCS = {
SHA1: hashlib.sha1,
SHA2_256: hashlib.sha256,
# Alternative use nacl.hash.sha256(data, encoder=nacl.encoding.RawEncoder) which has different footprint
SHA2_512: hashlib.sha512,
SHA3: lambda: hashlib.new('sha3_512'),
BLAKE2B: lambda: pyblake2.blake2b(),
BLAKE2S: lambda: pyblake2.blake2s(),
}
LENGTHS = {
SHA1: 20,
SHA2_256: 32,
SHA2_512: 64,
SHA3: 64,
BLAKE2B: 64,
BLAKE2S: 32,
}
def assertions(self, code=None):
if code and code != self.code:
raise MultihashError(message="Expecting code {}, got {}".format(code, self.code))
if self.code not in self.FUNCS:
raise MultihashError(message="Unsupported Hash type {}".format(self.code))
if (self.digestlength != len(self.digest)) or (self.digestlength != self.LENGTHS[self.code]):
raise MultihashError(message="Invalid lengths: expect {}, byte {}, len {}"
.format(self.LENGTHS[self.code], self.digestlength, len(self.digest)))
def __init__(self, multihash58=None, sha1hex=None, data=None, code=None, url=None):
"""
Accept variety of parameters,
:param multihash_58:
"""
digest = None
if url: # Assume its of the form somescheme:/somescheme/Q...
logging.debug("url={} {}".format(url.__class__.__name__,url))
if isinstance(url, str) and "/" in url: # https://.../Q...
url = urlparse(url)
if not isinstance(url, str):
multihash58 = url.path.split('/')[-1]
else:
multihash58 = url
if multihash58[0] not in ('5','Q'): # Simplistic check that it looks ok-ish
raise MultihashError(message="Invalid hash portion of URL {}".format(multihash58))
if multihash58:
self._multihash_binary = base58.b58decode(multihash58)
if sha1hex:
if python_version.startswith('2'):
digest = sha1hex.decode('hex') # Python2
else:
digest = bytes.fromhex(sha1hex) # Python3
code = self.SHA1
if data and code:
digest = self._hash(code, data)
if digest and code:
self._multihash_binary = bytearray([code, len(digest)])
self._multihash_binary.extend(digest)
self.assertions() # Check consistency
def _hash(self, code, data):
if not code in self.FUNCS:
raise MultihashError(message="Cant encode hash code={}".format(code))
hashfn = self.FUNCS.get(code)() # Note it calls the function in that strange way hashes work!
if isinstance(data, bytes):
hashfn.update(data)
elif isinstance(data, str):
# In Python 3 this is ok, would be better if we were sure it was utf8
# raise MultihashError(message="Should be passing bytes, not strings as could encode multiple ways") # TODO can remove this if really need to handle UTF8 strings, but better to push conversion upstream
hashfn.update(data.encode('utf-8'))
return hashfn.digest()
def check(self, data):
assert self.digest == self._hash(self.code, data), "Hash doesnt match expected"
@property
def code(self):
return self._multihash_binary[0]
@property
def digestlength(self):
return self._multihash_binary[1]
@property
def digest(self):
"""
:return: bytes, the digest part of any multihash
"""
return self._multihash_binary[2:]
@property
def sha1hex(self):
"""
:return: The hex of the sha1 (as used in DOI sqlite tables)
"""
self.assertions(self.SHA1)
return binascii.hexlify(self.digest).decode('utf-8') # The decode is turn bytes b'a1b2' to str 'a1b2'
@property
def multihash58(self):
foo = base58.b58encode(bytes(self._multihash_binary)) # Documentation says returns bytes, Mac returns string, want string
if isinstance(foo,bytes):
return foo.decode('ascii')
else:
return foo | agpl-3.0 | -774,083,055,685,155,600 | 35.564626 | 214 | 0.610346 | false |
Fat-Zer/FreeCAD_sf_master | src/Mod/Path/PathScripts/PathArray.py | 4 | 11223 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2015 Yorik van Havre <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import Path
import PathScripts
from PySide import QtCore
import math
__doc__ = """Path Array object and FreeCAD command"""
# Qt translation handling
def translate(context, text, disambig=None):
return QtCore.QCoreApplication.translate(context, text, disambig)
class ObjectArray:
def __init__(self, obj):
obj.addProperty("App::PropertyLink", "Base",
"Path", "The path to array")
obj.addProperty("App::PropertyEnumeration", "Type",
"Path", QtCore.QT_TRANSLATE_NOOP("App::Property", "Pattern method"))
obj.addProperty("App::PropertyVectorDistance", "Offset",
"Path", "The spacing between the array copies in Linear pattern")
obj.addProperty("App::PropertyInteger", "CopiesX",
"Path", "The number of copies in X direction in Linear pattern")
obj.addProperty("App::PropertyInteger", "CopiesY",
"Path", "The number of copies in Y direction in Linear pattern")
obj.addProperty("App::PropertyAngle", "Angle",
"Path", "Total angle in Polar pattern")
obj.addProperty("App::PropertyInteger", "Copies",
"Path", "The number of copies in Linear 1D and Polar pattern")
obj.addProperty("App::PropertyVector", "Centre",
"Path", "The centre of rotation in Polar pattern")
obj.addProperty("App::PropertyLink", "ToolController",
"Path", QtCore.QT_TRANSLATE_NOOP("App::Property", "The tool controller that will be used to calculate the path"))
obj.Type = ['Linear1D', 'Linear2D', 'Polar']
self.setEditorProperties(obj)
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def setEditorProperties(self, obj):
if obj.Type == 'Linear2D':
obj.setEditorMode('Angle', 2)
obj.setEditorMode('Copies', 2)
obj.setEditorMode('Centre', 2)
obj.setEditorMode('CopiesX', 0)
obj.setEditorMode('CopiesY', 0)
obj.setEditorMode('Offset', 0)
elif obj.Type == 'Polar':
obj.setEditorMode('Angle', 0)
obj.setEditorMode('Copies', 0)
obj.setEditorMode('Centre', 0)
obj.setEditorMode('CopiesX', 2)
obj.setEditorMode('CopiesY', 2)
obj.setEditorMode('Offset', 2)
elif obj.Type == 'Linear1D':
obj.setEditorMode('Angle', 2)
obj.setEditorMode('Copies', 0)
obj.setEditorMode('Centre', 2)
obj.setEditorMode('CopiesX', 2)
obj.setEditorMode('CopiesY', 2)
obj.setEditorMode('Offset', 0)
def onChanged(self, obj, prop):
if prop == "Type":
self.setEditorProperties(obj)
def rotatePath(self, path, angle, centre):
'''
Rotates Path around given centre vector
Only X and Y is considered
'''
CmdMoveRapid = ['G0', 'G00']
CmdMoveStraight = ['G1', 'G01']
CmdMoveCW = ['G2', 'G02']
CmdMoveCCW = ['G3', 'G03']
CmdDrill = ['G81', 'G82', 'G83']
CmdMoveArc = CmdMoveCW + CmdMoveCCW
CmdMove = CmdMoveStraight + CmdMoveArc
commands = []
ang = angle / 180 * math.pi
currX = 0
currY = 0
for cmd in path.Commands:
if (cmd.Name in CmdMoveRapid) or (cmd.Name in CmdMove) or (cmd.Name in CmdDrill):
params = cmd.Parameters
x = params.get("X")
if x is None:
x = currX
currX = x
y = params.get("Y")
if y is None:
y = currY
currY = y
# "move" the centre to origin
x = x - centre.x
y = y - centre.y
# rotation around origin:
nx = x * math.cos(ang) - y * math.sin(ang)
ny = y * math.cos(ang) + x * math.sin(ang)
# "move" the centre back and update
params.update({'X': nx + centre.x, 'Y': ny + centre.y})
# Arcs need to have the I and J params rotated as well
if cmd.Name in CmdMoveArc:
i = params.get("I")
if i is None:
i = 0
j = params.get("J")
if j is None:
j = 0
ni = i * math.cos(ang) - j * math.sin(ang)
nj = j * math.cos(ang) + i * math.sin(ang)
params.update({'I': ni, 'J': nj})
cmd.Parameters = params
commands.append(cmd)
newPath = Path.Path(commands)
return newPath
def execute(self, obj):
if obj.Base:
if not obj.Base.isDerivedFrom("Path::Feature"):
return
if not obj.Base.Path:
return
if not obj.Base.ToolController:
return
obj.ToolController = obj.Base.ToolController
# build copies
basepath = obj.Base.Path
output = ""
if obj.Type == 'Linear1D':
for i in range(obj.Copies):
pl = FreeCAD.Placement()
pos = FreeCAD.Vector(obj.Offset.x * (i + 1), obj.Offset.y * (i + 1), 0)
pl.move(pos)
np = Path.Path([cm.transform(pl)
for cm in basepath.Commands])
output += np.toGCode()
elif obj.Type == 'Linear2D':
for i in range(obj.CopiesX + 1):
for j in range(obj.CopiesY + 1):
pl = FreeCAD.Placement()
# do not process the index 0,0. It will be processed at basepath
if not (i == 0 and j == 0):
if (i % 2) == 0:
pos = FreeCAD.Vector(obj.Offset.x * i, obj.Offset.y * j, 0)
else:
pos = FreeCAD.Vector(obj.Offset.x * i, obj.Offset.y * (obj.CopiesY - j), 0)
pl.move(pos)
np = Path.Path([cm.transform(pl)
for cm in basepath.Commands])
output += np.toGCode()
else:
for i in range(obj.Copies):
ang = 360
if obj.Copies > 0:
ang = obj.Angle / obj.Copies * (1 + i)
np = self.rotatePath(basepath, ang, obj.Centre)
output += np.toGCode()
# print output
path = Path.Path(output)
obj.Path = path
class ViewProviderArray:
def __init__(self, vobj):
self.Object = vobj.Object
vobj.Proxy = self
def attach(self, vobj):
self.Object = vobj.Object
return
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def claimChildren(self):
if hasattr(self, "Object"):
if hasattr(self.Object, "Base"):
if self.Object.Base:
return self.Object.Base
return []
class CommandPathArray:
def GetResources(self):
return {'Pixmap': 'Path_Array',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Array", "Array"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Array", "Creates an array from a selected path")}
def IsActive(self):
if bool(FreeCADGui.Selection.getSelection()) is False:
return False
try:
obj = FreeCADGui.Selection.getSelectionEx()[0].Object
return isinstance(obj.Proxy, PathScripts.PathOp.ObjectOp)
except(IndexError, AttributeError):
return False
def Activated(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelection()
if len(selection) != 1:
FreeCAD.Console.PrintError(
translate("Path_Array", "Please select exactly one path object")+"\n")
return
if not(selection[0].isDerivedFrom("Path::Feature")):
FreeCAD.Console.PrintError(
translate("Path_Array", "Please select exactly one path object")+"\n")
return
# if everything is ok, execute and register the transaction in the
# undo/redo stack
FreeCAD.ActiveDocument.openTransaction("Create Array")
FreeCADGui.addModule("PathScripts.PathArray")
FreeCADGui.addModule("PathScripts.PathUtils")
FreeCADGui.doCommand(
'obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Array")')
FreeCADGui.doCommand('PathScripts.PathArray.ObjectArray(obj)')
FreeCADGui.doCommand(
'obj.Base = (FreeCAD.ActiveDocument.' + selection[0].Name + ')')
# FreeCADGui.doCommand('PathScripts.PathArray.ViewProviderArray(obj.ViewObject)')
FreeCADGui.doCommand('obj.ViewObject.Proxy = 0')
FreeCADGui.doCommand('PathScripts.PathUtils.addToJob(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Array', CommandPathArray())
| lgpl-2.1 | -3,158,016,290,778,695,700 | 38.657244 | 137 | 0.505123 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugins/atresplayer.py | 4 | 2806 | from __future__ import print_function
import logging
import re
from functools import partial
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, DASHStream
from streamlink.utils import parse_json, update_scheme, search_dict
log = logging.getLogger(__name__)
class AtresPlayer(Plugin):
url_re = re.compile(r"https?://(?:www.)?atresplayer.com/")
state_re = re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL)
channel_id_schema = validate.Schema(
validate.transform(state_re.search),
validate.any(
None,
validate.all(
validate.get(1),
validate.transform(parse_json),
validate.transform(partial(search_dict, key="href")),
)
)
)
player_api_schema = validate.Schema(
validate.any(
None,
validate.all(
validate.transform(parse_json),
validate.transform(partial(search_dict, key="urlVideo")),
)
)
)
stream_schema = validate.Schema(
validate.transform(parse_json),
{"sources": [
validate.all({
"src": validate.url(),
validate.optional("type"): validate.text
})
]}, validate.get("sources"))
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def __init__(self, url):
# must be HTTPS
super(AtresPlayer, self).__init__(update_scheme("https://", url))
def _get_streams(self):
api_urls = self.session.http.get(self.url, schema=self.channel_id_schema)
_api_url = list(api_urls)[0]
log.debug("API URL: {0}".format(_api_url))
player_api_url = self.session.http.get(_api_url, schema=self.player_api_schema)
for api_url in player_api_url:
log.debug("Player API URL: {0}".format(api_url))
for source in self.session.http.get(api_url, schema=self.stream_schema):
log.debug("Stream source: {0} ({1})".format(source['src'], source.get("type", "n/a")))
if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
streams = HLSStream.parse_variant_playlist(self.session, source["src"])
if not streams:
yield "live", HLSStream(self.session, source["src"])
else:
for s in streams.items():
yield s
elif source["type"] == "application/dash+xml":
for s in DASHStream.parse_manifest(self.session, source["src"]).items():
yield s
__plugin__ = AtresPlayer
| gpl-2.0 | 1,943,419,945,016,499,700 | 35.441558 | 102 | 0.560941 | false |
CERNDocumentServer/invenio | modules/webjournal/lib/webjournal_unit_tests.py | 3 | 2784 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebJournal."""
__revision__ = \
"$Id$"
# pylint invenio/modules/webjournal/lib/webjournal_tests.py
from invenio.testutils import InvenioTestCase
from invenio.webjournal_utils import compare_issues
from invenio.webjournal import issue_is_later_than
#from invenio import webjournal_utils
from invenio.testutils import make_test_suite, run_test_suite
#from invenio.config import CFG_SITE_URL
class TestCompareIssues(InvenioTestCase):
"""Tests for comparing issues."""
def test_compare_issues(self):
"""webjournal - tests comparing issues"""
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), 0)
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
def test_issue1_is_later_than(self):
"""webjournal - tests comparing issue1 is later than issue2 """
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
TEST_SUITE = make_test_suite(TestCompareIssues)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 | -1,126,148,306,465,278,200 | 31 | 74 | 0.665948 | false |
CuriousLearner/standup | src/updates/views.py | 1 | 1310 | from django.shortcuts import render, get_object_or_404
from django.views.generic import DetailView
from authen.models import User
from fraternity.models import Team, Project
from .models import Post, Hashtag
class GetUserPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
user = get_object_or_404(User, username=self.kwargs['username'])
return self.model.objects.filter(posted_by=user)
class GetTeamPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
team = get_object_or_404(Team, slug=self.kwargs['team'])
return self.model.objects.filter(team=team)
class GetProjectPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
project = get_object_or_404(Project, slug=self.kwargs['project'])
return self.model.objects.filter(project=project)
class GetHashtagPosts(DetailView):
model = Post
template_name = 'posts.html'
context_object_name = 'posts'
def get_object(self):
hashtag = get_object_or_404(Hashtag, content=self.kwargs['hashtag'])
return self.model.objects.filter(hashtags=hashtag)
| gpl-3.0 | -1,561,476,055,481,549,600 | 27.478261 | 76 | 0.693893 | false |
zasdfgbnm/tensorflow | tensorflow/contrib/text/python/ops/skip_gram_ops.py | 76 | 21608 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from tensorflow.contrib import lookup
from tensorflow.contrib.text.python.ops import gen_skip_gram_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import input as input_ops
_checkpoint_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_skip_gram_ops.so"))
ops.NotDifferentiable("SkipGramGenerateCandidates")
def skip_gram_sample(input_tensor,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
vocab_freq_table=None,
vocab_min_count=None,
vocab_subsampling=None,
corpus_size=None,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Generates skip-gram token and label paired Tensors from the input tensor.
Generates skip-gram `("token", "label")` pairs using each element in the
rank-1 `input_tensor` as a token. The window size used for each token will be
randomly selected from the range specified by `[min_skips, max_skips]`,
inclusive. See https://arxiv.org/abs/1301.3781 for more details about
skip-gram.
For example, given `input_tensor = ["the", "quick", "brown", "fox", "jumps"]`,
`min_skips = 1`, `max_skips = 2`, `emit_self_as_target = False`, the output
`(tokens, labels)` pairs for the token "quick" will be randomly selected from
either `(tokens=["quick", "quick"], labels=["the", "brown"])` for 1 skip, or
`(tokens=["quick", "quick", "quick"], labels=["the", "brown", "fox"])` for 2
skips.
If `emit_self_as_target = True`, each token will also be emitted as a label
for itself. From the previous example, the output will be either
`(tokens=["quick", "quick", "quick"], labels=["the", "quick", "brown"])` for 1
skip, or `(tokens=["quick", "quick", "quick", "quick"], labels=["the",
"quick", "brown", "fox"])` for 2 skips.
The same process is repeated for each element of `input_tensor` and
concatenated together into the two output rank-1 `Tensors` (one for all the
tokens, another for all the labels).
If `vocab_freq_table` is specified, tokens in `input_tensor` that are not
present in the vocabulary are discarded. Tokens whose frequency counts are
below `vocab_min_count` are also discarded. Tokens whose frequency proportions
in the corpus exceed `vocab_subsampling` may be randomly down-sampled. See
Eq. 5 in http://arxiv.org/abs/1310.4546 for more details about subsampling.
Due to the random window sizes used for each token, the lengths of the outputs
are non-deterministic, unless `batch_size` is specified to batch the outputs
to always return `Tensors` of length `batch_size`.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself when `emit_self_as_target = True` - or no output
otherwise.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in
`input_tensor` from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of
elements in `input_tensor` to use in generating skip-gram candidates. -1
means to use the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
vocab_freq_table: (Optional) A lookup table (subclass of
`lookup.InitializableLookupTableBase`) that maps tokens to their raw
frequency counts. If specified, any token in `input_tensor` that is not
found in `vocab_freq_table` will be filtered out before generating
skip-gram candidates. While this will typically map to integer raw
frequency counts, it could also map to float frequency proportions.
`vocab_min_count` and `corpus_size` should be in the same units as this.
vocab_min_count: (Optional) `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_table`) for a token to be
kept in `input_tensor`. If this is specified, `vocab_freq_table` must also
be specified - and they should both be in the same units.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently (based on the ratio of the token's `vocab_freq_table` value to
the `corpus_size`) will be randomly down-sampled. Reasonable starting
values may be around 1e-3 or 1e-5. If this is specified, both
`vocab_freq_table` and `corpus_size` must also be specified. See Eq. 5
in http://arxiv.org/abs/1310.4546 for more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_table`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_table` and `vocab_subsampling` must also be specified.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See `set_random_seed` docs for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_freq_table` is not provided, but `vocab_min_count`,
`vocab_subsampling`, or `corpus_size` is specified. If `vocab_subsampling`
and `corpus_size` are not both present or both absent.
"""
if vocab_freq_table is None and (vocab_min_count is not None or
vocab_subsampling is not None or
corpus_size is not None):
raise ValueError(
"vocab_freq_table is not provided, but vocab_min_count={}, "
"vocab_subsampling={}, or corpus_size={} is not None. These settings "
"are useless without a vocab_freq_table.".format(
vocab_min_count, vocab_subsampling, corpus_size))
if (vocab_subsampling is None) != (corpus_size is None):
raise ValueError(
"vocab_subsampling is {} while corpus_size is {} - both must be "
"provided in order for subsampling to work.".format(
vocab_subsampling, corpus_size))
with ops.name_scope(
name,
"skip_gram_sample",
values=[input_tensor, min_skips, max_skips, start, limit]):
input_tensor = _filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
corpus_size=corpus_size,
seed=seed)
seed1, seed2 = random_seed.get_seed(seed)
tokens, labels = gen_skip_gram_ops.skip_gram_generate_candidates(
input_tensor=input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
# Note that seed here should be seed1! This is due to
# GuardedPhiloxRandom's hard-coded attributes of "seed" and "seed2".
seed=seed1,
seed2=seed2)
# TODO(weiho): If the need arises, add support for sparse input_tensor that
# figures out sentence boundaries, then calls
# skip_gram_generate_candidates() on each sentence.
# Batches the (tokens, labels) outputs so that they will be of deterministic
# batch_size, to facilitate feeding them into the rest of the network.
if batch_size is not None and batch_size > 0:
batch_capacity = (batch_capacity
if (batch_capacity is not None and batch_capacity > 0)
else 100 * batch_size)
return input_ops.batch(
[tokens, labels],
batch_size,
capacity=batch_capacity,
enqueue_many=True)
return tokens, labels
def skip_gram_sample_with_text_vocab(input_tensor,
vocab_freq_file,
vocab_token_index=0,
vocab_token_dtype=dtypes.string,
vocab_freq_index=1,
vocab_freq_dtype=dtypes.float64,
vocab_delimiter=",",
vocab_min_count=0,
vocab_subsampling=None,
corpus_size=None,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Skip-gram sampling with a text vocabulary file.
Wrapper around `skip_gram_sample()` for use with a text vocabulary file. The
vocabulary file is expected to be a plain-text file, with lines of
`vocab_delimiter`-separated columns. The `vocab_token_index` column should
contain the vocabulary term, while the `vocab_freq_index` column should
contain the number of times that term occurs in the corpus. For example, with
a text vocabulary file of:
```
bonjour,fr,42
hello,en,777
hola,es,99
```
You should set `vocab_delimiter=","`, `vocab_token_index=0`, and
`vocab_freq_index=2`.
See `skip_gram_sample()` documentation for more details about the skip-gram
sampling process.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
vocab_freq_file: `string` specifying full file path to the text vocab file.
vocab_token_index: `int` specifying which column in the text vocab file
contains the tokens.
vocab_token_dtype: `DType` specifying the format of the tokens in the text
vocab file.
vocab_freq_index: `int` specifying which column in the text vocab file
contains the frequency counts of the tokens.
vocab_freq_dtype: `DType` specifying the format of the frequency counts in
the text vocab file.
vocab_delimiter: `string` specifying the delimiter used in the text vocab
file.
vocab_min_count: `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_file`) for a token to be
kept in `input_tensor`. This should correspond with `vocab_freq_dtype`.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently will be randomly down-sampled. Reasonable starting values may
be around 1e-3 or 1e-5. See Eq. 5 in http://arxiv.org/abs/1310.4546 for
more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_file`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_file` and `vocab_subsampling` must also be specified.
If `corpus_size` is needed but not supplied, then it will be calculated
from `vocab_freq_file`. You might want to supply your own value if you
have already eliminated infrequent tokens from your vocabulary files
(where frequency < vocab_min_count) to save memory in the internal token
lookup table. Otherwise, the unused tokens' variables will waste memory.
The user-supplied `corpus_size` value must be greater than or equal to the
sum of all the frequency counts of `vocab_freq_file`.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in `input_tensor`
from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of elements in
`input_tensor` to use in generating skip-gram candidates. -1 means to use
the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See
[`set_random_seed`](../../g3doc/python/constant_op.md#set_random_seed)
for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_token_index` or `vocab_freq_index` is less than 0 or
exceeds the number of columns in `vocab_freq_file`. If `vocab_token_index`
and `vocab_freq_index` are both set to the same column. If any token in
`vocab_freq_file` has a negative frequency.
"""
if vocab_token_index < 0 or vocab_freq_index < 0:
raise ValueError(
"vocab_token_index={} and vocab_freq_index={} must both be >= 0.".
format(vocab_token_index, vocab_freq_index))
if vocab_token_index == vocab_freq_index:
raise ValueError(
"vocab_token_index and vocab_freq_index should be different, but are "
"both {}.".format(vocab_token_index))
# Iterates through the vocab file and calculates the number of vocab terms as
# well as the total corpus size (by summing the frequency counts of all the
# vocab terms).
calculated_corpus_size = 0.0
vocab_size = 0
with gfile.GFile(vocab_freq_file, mode="r") as f:
reader = csv.reader(f, delimiter=vocab_delimiter)
for row in reader:
if vocab_token_index >= len(row) or vocab_freq_index >= len(row):
raise ValueError(
"Row in vocab file only has {} columns, so vocab_token_index={} or "
"vocab_freq_index={} is out of bounds. Row content: {}".format(
len(row), vocab_token_index, vocab_freq_index, row))
vocab_size += 1
freq = vocab_freq_dtype.as_numpy_dtype(row[vocab_freq_index])
if freq < 0:
raise ValueError(
"Row in vocab file has negative frequency of {}. Row content: {}".
format(freq, row))
# Note: tokens whose frequencies are below vocab_min_count will still
# contribute to the total corpus size used for vocab subsampling.
calculated_corpus_size += freq
if not corpus_size:
corpus_size = calculated_corpus_size
elif calculated_corpus_size - corpus_size > 1e-6:
raise ValueError(
"`corpus_size`={} must be greater than or equal to the sum of all the "
"frequency counts ({}) of `vocab_freq_file` ({}).".format(
corpus_size, calculated_corpus_size, vocab_freq_file))
vocab_freq_table = lookup.HashTable(
lookup.TextFileInitializer(
filename=vocab_freq_file,
key_dtype=vocab_token_dtype,
key_index=vocab_token_index,
value_dtype=vocab_freq_dtype,
value_index=vocab_freq_index,
vocab_size=vocab_size,
delimiter=vocab_delimiter),
# For vocab terms not in vocab file, use a default value of -1.
default_value=-1)
return skip_gram_sample(
input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
# corpus_size is not used unless vocab_subsampling is specified.
corpus_size=None if vocab_subsampling is None else corpus_size,
batch_size=batch_size,
batch_capacity=batch_capacity,
seed=seed,
name=name)
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
vocab_subsampling, corpus_size, seed):
"""Filters input tensor based on vocab freq, threshold, and subsampling."""
if vocab_freq_table is None:
return input_tensor
if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
raise ValueError(
"vocab_freq_table must be a subclass of "
"InitializableLookupTableBase (such as HashTable) instead of type "
"{}.".format(type(vocab_freq_table)))
with ops.name_scope(
"filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
freq = vocab_freq_table.lookup(input_tensor)
# Filters out elements in input_tensor that are not found in
# vocab_freq_table (table returns a default value of -1 specified above when
# an element is not found).
mask = math_ops.not_equal(freq, vocab_freq_table.default_value)
# Filters out elements whose vocab frequencies are less than the threshold.
if vocab_min_count is not None:
cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
mask = math_ops.logical_and(mask,
math_ops.greater_equal(freq, cast_threshold))
input_tensor = array_ops.boolean_mask(input_tensor, mask)
freq = array_ops.boolean_mask(freq, mask)
if not vocab_subsampling:
return input_tensor
if vocab_subsampling < 0 or vocab_subsampling > 1:
raise ValueError(
"Invalid vocab_subsampling={} - it should be within range [0, 1].".
format(vocab_subsampling))
# Subsamples the input tokens based on vocabulary frequency and
# vocab_subsampling threshold (ie randomly discard commonly appearing
# tokens).
with ops.name_scope(
"subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
corpus_size = math_ops.cast(corpus_size, dtypes.float64)
freq = math_ops.cast(freq, dtypes.float64)
vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)
# From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
# suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
keep_prob = ((math_ops.sqrt(freq /
(vocab_subsampling * corpus_size)) + 1.0) *
(vocab_subsampling * corpus_size / freq))
random_prob = random_ops.random_uniform(
array_ops.shape(freq),
minval=0,
maxval=1,
dtype=dtypes.float64,
seed=seed)
mask = math_ops.less_equal(random_prob, keep_prob)
return array_ops.boolean_mask(input_tensor, mask)
| apache-2.0 | -1,664,382,466,397,084,200 | 47.124722 | 80 | 0.657997 | false |
epfl-idevelop/jahia2wp | src/parser/sitemap_node.py | 1 | 1306 | from anytree import Node, RenderTree
class SitemapNode(Node):
"""
A SitemapNode represents a node of the sitemap. The root node
(the homepage) is available as a property of the Site class,
e.g. site.sitemaps["en"] for the English sitemap. This class
is an extension of Node, from the anytree library:
https://pypi.python.org/pypi/anytree/1.0.1
A SitemapNode can reference two types of pages:
1. Internal pages, in which case the "page" property is the Page itself and the
"ref" property is the Page's UUID.
2. External pages, in which case the "page" property is None and the
"ref" property is the external URL, e.g. https://www.google.com.
"""
def __init__(self, name, page, ref, parent=None):
super().__init__(name, parent)
self.page = page
self.ref = ref
def print_node(self):
"""Print the node"""
for pre, fill, node in RenderTree(self):
print("%s%s" % (pre, node.name))
@classmethod
def from_navigation_page(cls, navigation_page, parent):
"""Create a SitemapNode from a NavigationPage"""
return SitemapNode(
name=navigation_page.title,
page=navigation_page.page,
ref=navigation_page.ref,
parent=parent)
| mit | -6,438,769,800,397,549,000 | 30.095238 | 83 | 0.630168 | false |
Vimos/scikit-learn | sklearn/preprocessing/_function_transformer.py | 41 | 3475 | from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
A FunctionTransformer will not do any checks on its function's output.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, optional default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
func. If validate is false, there will be no input validation.
If it is true, then X will be converted to a 2-dimensional NumPy
array or sparse matrix. If this conversion is not possible or X
contains NaN or infinity, an exception is raised.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y : bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
kw_args : dict, optional
Dictionary of additional keyword arguments to pass to func.
inv_kw_args : dict, optional
Dictionary of additional keyword arguments to pass to inverse_func.
"""
def __init__(self, func=None, inverse_func=None, validate=True,
accept_sparse=False, pass_y=False,
kw_args=None, inv_kw_args=None):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def fit(self, X, y=None):
if self.validate:
check_array(X, self.accept_sparse)
return self
def transform(self, X, y=None):
return self._transform(X, y, self.func, self.kw_args)
def inverse_transform(self, X, y=None):
return self._transform(X, y, self.inverse_func, self.inv_kw_args)
def _transform(self, X, y=None, func=None, kw_args=None):
if self.validate:
X = check_array(X, self.accept_sparse)
if func is None:
func = _identity
return func(X, *((y,) if self.pass_y else ()),
**(kw_args if kw_args else {}))
| bsd-3-clause | 3,600,143,690,742,221,300 | 35.968085 | 77 | 0.664173 | false |
lfcnassif/MultiContentViewer | release/modules/ext/libreoffice/program/python-core-3.3.0/lib/email/_policybase.py | 94 | 14327 | """Policy framework for the email package.
Allows fine grained feature control of how the package parses and emits data.
"""
import abc
from email import header
from email import charset as _charset
from email.utils import _has_surrogates
__all__ = [
'Policy',
'Compat32',
'compat32',
]
class _PolicyBase:
"""Policy Object basic framework.
This class is useless unless subclassed. A subclass should define
class attributes with defaults for any values that are to be
managed by the Policy object. The constructor will then allow
non-default values to be set for these attributes at instance
creation time. The instance will be callable, taking these same
attributes keyword arguments, and returning a new instance
identical to the called instance except for those values changed
by the keyword arguments. Instances may be added, yielding new
instances with any non-default values from the right hand
operand overriding those in the left hand operand. That is,
A + B == A(<non-default values of B>)
The repr of an instance can be used to reconstruct the object
if and only if the repr of the values can be used to reconstruct
those values.
"""
def __init__(self, **kw):
"""Create new Policy, possibly overriding some defaults.
See class docstring for a list of overridable attributes.
"""
for name, value in kw.items():
if hasattr(self, name):
super(_PolicyBase,self).__setattr__(name, value)
else:
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
name, self.__class__.__name__))
def __repr__(self):
args = [ "{}={!r}".format(name, value)
for name, value in self.__dict__.items() ]
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def clone(self, **kw):
"""Return a new instance with specified attributes changed.
The new instance has the same attribute values as the current object,
except for the changes passed in as keyword arguments.
"""
newpolicy = self.__class__.__new__(self.__class__)
for attr, value in self.__dict__.items():
object.__setattr__(newpolicy, attr, value)
for attr, value in kw.items():
if not hasattr(self, attr):
raise TypeError(
"{!r} is an invalid keyword argument for {}".format(
attr, self.__class__.__name__))
object.__setattr__(newpolicy, attr, value)
return newpolicy
def __setattr__(self, name, value):
if hasattr(self, name):
msg = "{!r} object attribute {!r} is read-only"
else:
msg = "{!r} object has no attribute {!r}"
raise AttributeError(msg.format(self.__class__.__name__, name))
def __add__(self, other):
"""Non-default values from right operand override those from left.
The object returned is a new instance of the subclass.
"""
return self.clone(**other.__dict__)
def _append_doc(doc, added_doc):
doc = doc.rsplit('\n', 1)[0]
added_doc = added_doc.split('\n', 1)[1]
return doc + '\n' + added_doc
def _extend_docstrings(cls):
if cls.__doc__ and cls.__doc__.startswith('+'):
cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
for name, attr in cls.__dict__.items():
if attr.__doc__ and attr.__doc__.startswith('+'):
for c in (c for base in cls.__bases__ for c in base.mro()):
doc = getattr(getattr(c, name), '__doc__')
if doc:
attr.__doc__ = _append_doc(doc, attr.__doc__)
break
return cls
class Policy(_PolicyBase, metaclass=abc.ABCMeta):
r"""Controls for how messages are interpreted and formatted.
Most of the classes and many of the methods in the email package accept
Policy objects as parameters. A Policy object contains a set of values and
functions that control how input is interpreted and how output is rendered.
For example, the parameter 'raise_on_defect' controls whether or not an RFC
violation results in an error being raised or not, while 'max_line_length'
controls the maximum length of output lines when a Message is serialized.
Any valid attribute may be overridden when a Policy is created by passing
it as a keyword argument to the constructor. Policy objects are immutable,
but a new Policy object can be created with only certain values changed by
calling the Policy instance with keyword arguments. Policy objects can
also be added, producing a new Policy object in which the non-default
attributes set in the right hand operand overwrite those specified in the
left operand.
Settable attributes:
raise_on_defect -- If true, then defects should be raised as errors.
Default: False.
linesep -- string containing the value to use as separation
between output lines. Default '\n'.
cte_type -- Type of allowed content transfer encodings
7bit -- ASCII only
8bit -- Content-Transfer-Encoding: 8bit is allowed
Default: 8bit. Also controls the disposition of
(RFC invalid) binary data in headers; see the
documentation of the binary_fold method.
max_line_length -- maximum length of lines, excluding 'linesep',
during serialization. None or 0 means no line
wrapping is done. Default is 78.
"""
raise_on_defect = False
linesep = '\n'
cte_type = '8bit'
max_line_length = 78
def handle_defect(self, obj, defect):
"""Based on policy, either raise defect or call register_defect.
handle_defect(obj, defect)
defect should be a Defect subclass, but in any case must be an
Exception subclass. obj is the object on which the defect should be
registered if it is not raised. If the raise_on_defect is True, the
defect is raised as an error, otherwise the object and the defect are
passed to register_defect.
This method is intended to be called by parsers that discover defects.
The email package parsers always call it with Defect instances.
"""
if self.raise_on_defect:
raise defect
self.register_defect(obj, defect)
def register_defect(self, obj, defect):
"""Record 'defect' on 'obj'.
Called by handle_defect if raise_on_defect is False. This method is
part of the Policy API so that Policy subclasses can implement custom
defect handling. The default implementation calls the append method of
the defects attribute of obj. The objects used by the email package by
default that get passed to this method will always have a defects
attribute with an append method.
"""
obj.defects.append(defect)
def header_max_count(self, name):
"""Return the maximum allowed number of headers named 'name'.
Called when a header is added to a Message object. If the returned
value is not 0 or None, and there are already a number of headers with
the name 'name' equal to the value returned, a ValueError is raised.
Because the default behavior of Message's __setitem__ is to append the
value to the list of headers, it is easy to create duplicate headers
without realizing it. This method allows certain headers to be limited
in the number of instances of that header that may be added to a
Message programmatically. (The limit is not observed by the parser,
which will faithfully produce as many headers as exist in the message
being parsed.)
The default implementation returns None for all header names.
"""
return None
@abc.abstractmethod
def header_source_parse(self, sourcelines):
"""Given a list of linesep terminated strings constituting the lines of
a single header, return the (name, value) tuple that should be stored
in the model. The input lines should retain their terminating linesep
characters. The lines passed in by the email package may contain
surrogateescaped binary data.
"""
raise NotImplementedError
@abc.abstractmethod
def header_store_parse(self, name, value):
"""Given the header name and the value provided by the application
program, return the (name, value) that should be stored in the model.
"""
raise NotImplementedError
@abc.abstractmethod
def header_fetch_parse(self, name, value):
"""Given the header name and the value from the model, return the value
to be returned to the application program that is requesting that
header. The value passed in by the email package may contain
surrogateescaped binary data if the lines were parsed by a BytesParser.
The returned value should not contain any surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold(self, name, value):
"""Given the header name and the value from the model, return a string
containing linesep characters that implement the folding of the header
according to the policy controls. The value passed in by the email
package may contain surrogateescaped binary data if the lines were
parsed by a BytesParser. The returned value should not contain any
surrogateescaped data.
"""
raise NotImplementedError
@abc.abstractmethod
def fold_binary(self, name, value):
"""Given the header name and the value from the model, return binary
data containing linesep characters that implement the folding of the
header according to the policy controls. The value passed in by the
email package may contain surrogateescaped binary data.
"""
raise NotImplementedError
@_extend_docstrings
class Compat32(Policy):
"""+
This particular policy is the backward compatibility Policy. It
replicates the behavior of the email package version 5.1.
"""
def _sanitize_header(self, name, value):
# If the header value contains surrogates, return a Header using
# the unknown-8bit charset to encode the bytes as encoded words.
if not isinstance(value, str):
# Assume it is already a header object
return value
if _has_surrogates(value):
return header.Header(value, charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
return value
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters.
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name and value are returned unmodified.
"""
return (name, value)
def header_fetch_parse(self, name, value):
"""+
If the value contains binary data, it is converted into a Header object
using the unknown-8bit charset. Otherwise it is returned unmodified.
"""
return self._sanitize_header(name, value)
def fold(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. Non-ASCII binary data are CTE encoded using the
unknown-8bit charset.
"""
return self._fold(name, value, sanitize=True)
def fold_binary(self, name, value):
"""+
Headers are folded using the Header folding algorithm, which preserves
existing line breaks in the value, and wraps each resulting line to the
max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
encoded using the unknown-8bit charset. Otherwise the original source
header is used, with its existing line breaks and/or binary data.
"""
folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, sanitize):
parts = []
parts.append('%s: ' % name)
if isinstance(value, str):
if _has_surrogates(value):
if sanitize:
h = header.Header(value,
charset=_charset.UNKNOWN8BIT,
header_name=name)
else:
# If we have raw 8bit data in a byte string, we have no idea
# what the encoding is. There is no safe way to split this
# string. If it's ascii-subset, then we could do a normal
# ascii split, but if it's multibyte then we could break the
# string. There's no way to know so the least harm seems to
# be to not split the string and risk it being too long.
parts.append(value)
h = None
else:
h = header.Header(value, header_name=name)
else:
# Assume it is a Header-like object.
h = value
if h is not None:
parts.append(h.encode(linesep=self.linesep,
maxlinelen=self.max_line_length))
parts.append(self.linesep)
return ''.join(parts)
compat32 = Compat32()
| lgpl-3.0 | 405,503,029,564,227,460 | 39.019553 | 80 | 0.62281 | false |
MSFTOSSMgmt/WPSDSCLinux | Providers/Scripts/3.x/Scripts/nxFileInventory.py | 2 | 14839 | #!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# See license.txt for license information.
# ====================================
from __future__ import print_function
from __future__ import with_statement
from contextlib import contextmanager
import os
import pwd
import grp
import codecs
import fnmatch
import copy
import imp
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
try:
import hashlib
md5const = hashlib.md5
shaconst = hashlib.sha256
except ImportError: # Only sha-1 is available for python2.4.
import md5
md5const = md5.md5
import sha
shaconst = sha.sha
# [ClassVersion("1.0.0"), Description("The configuration provider for files and directories."), FriendlyName("nxFileInventory")]
# class MSFT_nxFileInventoryResource:OMI_BaseResource
# {
# [Key, InventoryFilter] string DestinationPath;
# [Write, InventoryFilter] boolean Recurse; //default = false
# [Write, InventoryFilter] boolean UseSudo; //default = false
# [Write, ValueMap{"follow", "manage", "ignore" }, Values{"follow", "manage", "ignore"},InventoryFilter] string Links; //default follow
# [Write, ValueMap{"md5", "sha-256", "mtime", "ctime"}, Values{"md5","sha-256","mtime","ctime"},InventoryFilter] string Checksum; //default md5
# [Write, ValueMap{"file", "directory", "*"},Values{"file", "directory","*"}, InventoryFilter] string Type; //default *
# [Write, InventoryFilter] uint32 MaxContentsReturnable; //default 1024 bytes
# [Write, InventoryFilter] uint64 MaxOutputSize; //default 10485760 bytes
# [Read] string Contents;
# [Read] datetime ModifiedDate;
# [Read] datetime CreatedDate;
# [Read] string Mode;
# [Read] string Group;
# [Read] string Owner;
# [Read] uint64 FileSize;
# };
#{'Links': u'ignore', 'MaxOutputSize': None, 'Checksum': u'md5', 'Recurse': None, 'MaxContentsReturnable': None, 'DestinationPath': u'/tmp', 'Type': u'directory'}
def init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
if DestinationPath is None :
DestinationPath = ''
if Recurse is None :
Recurse = False
if UseSudo is None :
UseSudo = False
if Links is None :
Links = 'follow'
if Checksum is None :
Checksum = 'md5'
if Type is None :
Type = '*'
if MaxContentsReturnable is None or MaxContentsReturnable < 0:
MaxContentsReturnable = 1024
if MaxOutputSize is None or MaxOutputSize < 0:
MaxOutputSize = 10485760
return DestinationPath, Recurse, Links.lower(), \
Checksum.lower(), Type.lower(), \
MaxContentsReturnable, MaxOutputSize, UseSudo
def Set_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
return [0]
def Test_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
return [0]
def Get_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
arg_names = list(locals().keys())
DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo \
= init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
retval = 0
DestinationPath = protocol.MI_String(DestinationPath)
Type = protocol.MI_String(Type)
MaxContentsReturnable = protocol.MI_Uint32(MaxContentsReturnable)
MaxOutputSize = protocol.MI_Uint64(MaxOutputSize)
Recurse = protocol.MI_Boolean(Recurse)
UseSudo = protocol.MI_Boolean(UseSudo)
Links = protocol.MI_String(Links)
Checksum = protocol.MI_String(Checksum)
Contents = protocol.MI_String('')
ModifiedDate = protocol.MI_Timestamp.from_time(0)
CreatedDate = protocol.MI_Timestamp.from_time(0)
Mode = protocol.MI_String('')
Group = protocol.MI_String('')
Owner = protocol.MI_String('')
FileSize = protocol.MI_Uint64(0)
arg_names.extend(['Contents', 'ModifiedDate', 'CreatedDate', 'Mode', 'Group', 'Owner', 'FileSize'])
retd = {}
ld = locals()
for k in arg_names :
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo \
= init_locals(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
retval = 0
out_size_cur = 158 # xml output header + footer length.
xml_overhead_array_element = 99 # xml output overhead per Inventory array entry.
xml_overhead_param = 102 # xml output overhead per Inventory parameter.
_Inventory = []
Inventory = DoInventory(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo)
for d in Inventory:
if out_size_cur < MaxOutputSize:
out_size_cur += xml_overhead_array_element
for k,v in d.items():
out_size_cur += xml_overhead_param
if 'Date' in k:
out_size_cur += len(k) + 25 + 3 # The final date format wil be 25 chars, +3 for type tag.
else:
out_size_cur += len(k) + len(repr(v)) -2 # The repr(v) will add two quotes.
if out_size_cur >= MaxOutputSize:
break
d['DestinationPath'] = protocol.MI_String(d['DestinationPath'])
d['Checksum'] = protocol.MI_String(d['Checksum'])
d['Type'] = protocol.MI_String(d['Type'])
d['Contents'] = protocol.MI_String(str(MaxContentsReturnable))
d['ModifiedDate'] = protocol.MI_Timestamp.from_time(d['ModifiedDate'])
d['CreatedDate'] = protocol.MI_Timestamp.from_time(d['CreatedDate'])
d['Mode'] = protocol.MI_String(d['Mode'])
d['Group'] = protocol.MI_String(d['Group'])
d['Owner'] = protocol.MI_String(d['Owner'])
d['FileSize'] = protocol.MI_Uint64(d['FileSize'])
_Inventory.append(d)
_Inventory = protocol.MI_InstanceA(_Inventory)
retd = {}
retd["__Inventory"] = _Inventory
return retval, retd
def DoInventory(DestinationPath, Recurse, Links, Checksum, Type, MaxContentsReturnable, MaxOutputSize, UseSudo):
Inventory = []
full_path = DestinationPath.split('/')
if full_path[-1] == '':
full_path[-1] = '*'
wildcard_path = False
for p in full_path:
if '*' in p or '?' in p:
wildc_start=full_path.index(p)
wildcard_path = True
break
if wildcard_path:
top = '/' + os.path.join(*full_path[:wildc_start])
else :
top = '/' + os.path.join(*full_path)
if not os.path.exists(top):
print("Error: Unable to read 'DestinationPath': " + DestinationPath)
LG().Log("ERROR","Unable to read 'DestinationPath': " + DestinationPath)
return Inventory
if not wildcard_path:
if Links == 'ignore' and os.path.islink(top):
return Inventory
if Type != 'directory' and os.path.isfile(top): # This is s single file.
d = GetFileInfo(top, Links, MaxContentsReturnable, Checksum)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
return Inventory
if '*' not in full_path[-1] and '?' not in full_path[-1]:
full_path.append('*') # It is a directory without the trailing '/', so add it.
dirs = set()
full_path_len = len(full_path)
for dirpath, dirnames, filenames in os.walk(top, followlinks=(Links == 'follow'), topdown=True):
dlen = len(dirpath.split('/'))
if dirpath.split('/')[-1] == '':
dlen -= 1
if wildcard_path and full_path_len >= dlen+1:
do_wildcard = True
else :
do_wildcard = False
st = os.stat(dirpath)
scandirs = []
if dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len ):
for filename in filenames:
if (dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len )) \
and not fnmatch.fnmatch(filename, full_path[-1]):
continue
if Type != 'directory':
d = GetFileInfo(os.path.join(dirpath, filename),\
Links, MaxContentsReturnable, Checksum)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
for dirname in dirnames:
if not ( Recurse and dlen+1 >= full_path_len ):
if ( do_wildcard and not fnmatch.fnmatch(dirname, full_path[dlen]) ) or \
( not Recurse and dlen > full_path_len ):
continue
st = os.stat(os.path.join(dirpath, dirname)) # use Lstat if follow?
dirkey = st.st_dev, st.st_ino
if dirkey not in dirs:
if Recurse or (not Recurse and dlen+1 < full_path_len) :
dirs.add(dirkey)
scandirs.append(dirname)
if Type != 'file' and ( dlen+1 == full_path_len or ( Recurse and dlen >= full_path_len ) ) :
d = GetDirInfo(os.path.join(dirpath, dirname), st, Checksum, Links)
if 'DestinationPath' in d.keys():
Inventory.append(copy.deepcopy(d))
dirnames[:] = scandirs
return Inventory
def GetFileInfo(fname, Links, MaxContentsReturnable, Checksum):
"""
Return a dictionary of info for file.
If 'Links' == 'follow', no link files will appear here,
those links will be sent to GetDirInfo() as direcroties.
Therefore only LStatFile is used.
If file is link and 'Links' == 'ignore' {} is returned.
"""
fileContentChecksum = "@{{Algoritm={0} Hash={1} Path={2}}}"
d = {}
if fname.endswith("omsadmin.conf"):
return d
if os.path.islink(fname):
d['Type'] = 'link'
else :
d['Type'] = 'file'
if d['Type'] == 'link' and Links == 'ignore':
return {}
stat_info = None
stat_info = LStatFile(fname)
if stat_info == None:
return {}
d['DestinationPath'] = fname
try:
d['Owner'] = pwd.getpwuid(stat_info.st_uid).pw_name
except:
d['Owner'] = str(stat_info.st_uid)
try:
d['Group'] = grp.getgrgid(stat_info.st_gid).gr_name
except:
d['Group'] = str(stat_info.st_gid)
d['Mode'] = str(oct(stat_info.st_mode))[-3:]
d['ModifiedDate'] = int(stat_info.st_mtime)
d['CreatedDate'] = int(stat_info.st_ctime)
d['FileSize'] = stat_info.st_size
# if file size is 0
# dont attempt to read the file
if stat_info.st_size == 0:
d['Contents'] = ''
if Checksum == 'md5' or Checksum == 'sha-256':
d['Checksum'] = ""
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
return d
if Checksum == 'md5' or Checksum == 'sha-256':
try:
fileHash = GetChecksum(fname,Checksum)
d['Checksum'] = fileContentChecksum.format(Checksum.upper(), fileHash.upper(), fname)
except:
d['Checksum'] = 0
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
if d['Type'] == 'link' and Links == 'manage' :
d['Contents'] = 'Symlink to ' + os.readlink(fname)
else :
d['Contents'], error = ReadFileLimited(fname,MaxContentsReturnable)
if d['Contents'] is None:
d['Contents'] = ''
return d
def GetDirInfo(dname, stat_info, Checksum, Links):
"""
Return a dictionary of info for directory.
Only if 'Links' == 'follow' will links be
processed here as directories.
"""
d = {}
if stat_info == None:
return d
d['Type'] = 'directory'
d['DestinationPath'] = dname
try:
d['Owner'] = pwd.getpwuid(stat_info.st_uid).pw_name
except:
d['Owner'] = str(stat_info.st_uid)
try:
d['Group'] = grp.getgrgid(stat_info.st_gid).gr_name
except:
d['Group'] = str(stat_info.st_gid)
if Checksum == 'md5' or Checksum == 'sha-256':
d['Checksum'] = '0'
elif Checksum == "ctime":
d['Checksum']= str(int(stat_info.st_ctime))
else : # Checksum == "mtime":
d['Checksum']= str(int(stat_info.st_mtime))
d['Mode'] = str(oct(stat_info.st_mode))[-3:]
d['ModifiedDate'] = int(stat_info.st_mtime)
d['CreatedDate'] = int(stat_info.st_ctime)
d['FileSize'] = stat_info.st_size
d['Contents'] = ''
if Links == 'manage' and os.path.islink(dname):
d['Contents'] = 'Symlink to ' + os.readlink(dname)
return d
@contextmanager
def opened_w_error(filename, mode="r"):
try:
f = codecs.open(filename, encoding='utf8', mode=mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
@contextmanager
def opened_bin_w_error(filename, mode="rb"):
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def ReadFileLimited(path, MaxContentsReturnable):
"""
Safely attempt to read a text file,
ensuring file is always closed at exit.
Read up to MaxContentsReturnable.
"""
d = ''
error = None
with opened_w_error(path) as (F, error):
if not error:
try:
d = F.read(MaxContentsReturnable)
except:
F.close()
return d.encode().decode('ascii','ignore'), error
def LStatFile(path):
"""
LStat the file. Do not follow the symlink.
"""
d = None
try:
d = os.lstat(path)
except:
pass
return d
def StatFile(path):
"""
Stat the file, following the symlink.
"""
d = None
try:
d = os.stat(path)
except:
pass
return d
def GetChecksum(fname, Checksum):
src_error = None
src_block = b'loopme'
if Checksum == "md5":
src_hash = md5const()
else : # sha-256
src_hash = shaconst()
with opened_bin_w_error(fname, 'rb') as (src_file, src_error):
if src_error:
return ""
while src_block :
src_block = src_file.read(8192)
src_hash.update(src_block)
return src_hash.hexdigest()
| mit | 8,424,978,946,264,072,000 | 36.28392 | 162 | 0.60004 | false |
gtaylor/python-colormath | examples/delta_e_matrix.py | 1 | 1329 | # -*- coding: utf-8 -*-
"""
For a massive matrix of colors and color labels you can download
the follow two files
# http://lyst-classifiers.s3.amazonaws.com/color/lab-colors.pk
# http://lyst-classifiers.s3.amazonaws.com/color/lab-matrix.pk
lab-colors is a cPickled list of color names and lab-matrix is a
cPickled (n,3) numpy array LAB values such that row q maps to
index q in the lab color list
"""
import sys
import csv
import bz2
import numpy as np
# Does some sys.path manipulation so we can run examples in-place.
# noinspection PyUnresolvedReferences
import example_config # noqa
from colormath.color_diff_matrix import delta_e_cie2000
from colormath.color_objects import LabColor
# load list of 1000 random colors from the XKCD color chart
if sys.version_info >= (3, 0):
reader = csv.DictReader(bz2.open("lab_matrix.csv.bz2", mode="rt"))
lab_matrix = np.array([list(map(float, row.values())) for row in reader])
else:
reader = csv.DictReader(bz2.BZ2File("lab_matrix.csv.bz2"))
lab_matrix = np.array([map(float, row.values()) for row in reader])
color = LabColor(lab_l=69.34, lab_a=-0.88, lab_b=-52.57)
lab_color_vector = np.array([color.lab_l, color.lab_a, color.lab_b])
delta = delta_e_cie2000(lab_color_vector, lab_matrix)
print("%s is closest to %s" % (color, lab_matrix[np.argmin(delta)]))
| bsd-3-clause | -516,180,357,757,013,440 | 32.225 | 77 | 0.725357 | false |
tima/ansible | lib/ansible/modules/notification/catapult.py | 49 | 4362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Jonathan Mainguy <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# basis of code taken from the ansible twillio and nexmo modules
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: catapult
version_added: 2.4
short_description: Send a sms / mms using the catapult bandwidth api
description:
- Allows notifications to be sent using sms / mms via the catapult bandwidth api.
options:
src:
description:
- One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
required: true
default: null
dest:
description:
- The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
required: true
default: null
msg:
description:
- The contents of the text message (must be 2048 characters or less).
required: true
default: null
media:
description:
- For MMS messages, a media url to the location of the media to be sent with the message.
user_id:
description:
- User Id from Api account page.
required: true
default: null
api_token:
description:
- Api Token from Api account page.
required: true
default: null
api_secret:
description:
- Api Secret from Api account page.
required: true
default: null
author: "Jonathan Mainguy (@Jmainguy)"
notes:
- Will return changed even if the media url is wrong.
- Will return changed if the destination number is invalid.
'''
EXAMPLES = '''
- name: Send a mms to multiple users
catapult:
src: "+15035555555"
dest:
- "+12525089000"
- "+12018994225"
media: "http://example.com/foobar.jpg"
msg: "Task is complete"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
- name: Send a sms to a single user
catapult:
src: "+15035555555"
dest: "+12018994225"
msg: "Consider yourself notified"
user_id: "{{ user_id }}"
api_token: "{{ api_token }}"
api_secret: "{{ api_secret }}"
'''
RETURN = '''
changed:
description: Whether the api accepted the message.
returned: always
type: boolean
sample: True
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def send(module, src, dest, msg, media, user_id, api_token, api_secret):
"""
Send the message
"""
AGENT = "Ansible"
URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
data = {'from': src, 'to': dest, 'text': msg}
if media:
data['media'] = media
headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
# Hack module params to have the Basic auth params that fetch_url expects
module.params['url_username'] = api_token.replace('\n', '')
module.params['url_password'] = api_secret.replace('\n', '')
return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True),
dest=dict(required=True, type='list'),
msg=dict(required=True),
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
media=dict(default=None, required=False),
),
)
src = module.params['src']
dest = module.params['dest']
msg = module.params['msg']
media = module.params['media']
user_id = module.params['user_id']
api_token = module.params['api_token']
api_secret = module.params['api_secret']
for number in dest:
rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
if info["status"] != 201:
body = json.loads(info["body"])
fail_msg = body["message"]
module.fail_json(msg=fail_msg)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,566,080,823,899,159,000 | 26.607595 | 124 | 0.626547 | false |
kingvuplus/ts-gui-3 | lib/python/Components/Harddisk.py | 3 | 28983 | import os
import time
from Tools.CList import CList
from Tools.HardwareInfo import HardwareInfo
from SystemInfo import SystemInfo
from Components.Console import Console
import Task
def readFile(filename):
file = open(filename)
data = file.read().strip()
file.close()
return data
def getProcMounts():
try:
mounts = open("/proc/mounts", 'r')
except IOError, ex:
print "[Harddisk] Failed to open /proc/mounts", ex
return []
result = [line.strip().split(' ') for line in mounts]
for item in result:
# Spaces are encoded as \040 in mounts
item[1] = item[1].replace('\\040', ' ')
return result
def isFileSystemSupported(filesystem):
try:
for fs in open('/proc/filesystems', 'r'):
if fs.strip().endswith(filesystem):
return True
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def findMountPoint(path):
'Example: findMountPoint("/media/hdd/some/file") returns "/media/hdd"'
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
DEVTYPE_UDEV = 0
DEVTYPE_DEVFS = 1
class Harddisk:
def __init__(self, device, removable=False):
self.device = device
if os.access("/dev/.udev", 0):
self.type = DEVTYPE_UDEV
elif os.access("/dev/.devfsd", 0):
self.type = DEVTYPE_DEVFS
else:
print "[Harddisk] Unable to determine structure of /dev"
self.card = False
self.max_idle_time = 0
self.idle_running = False
self.last_access = time.time()
self.last_stat = 0
self.timer = None
self.is_sleeping = False
self.dev_path = ''
self.disk_path = ''
self.mount_path = None
self.mount_device = None
self.phys_path = os.path.realpath(self.sysfsPath('device'))
self.removable = removable
self.internal = "pci" in self.phys_path or "ahci" in self.phys_path
try:
data = open("/sys/block/%s/queue/rotational" % device, "r").read().strip()
self.rotational = int(data)
except:
self.rotational = True
if self.type == DEVTYPE_UDEV:
self.dev_path = '/dev/' + self.device
self.disk_path = self.dev_path
self.card = "sdhci" in self.phys_path
elif self.type == DEVTYPE_DEVFS:
tmp = readFile(self.sysfsPath('dev')).split(':')
s_major = int(tmp[0])
s_minor = int(tmp[1])
for disc in os.listdir("/dev/discs"):
dev_path = os.path.realpath('/dev/discs/' + disc)
disk_path = dev_path + '/disc'
try:
rdev = os.stat(disk_path).st_rdev
except OSError:
continue
if s_major == os.major(rdev) and s_minor == os.minor(rdev):
self.dev_path = dev_path
self.disk_path = disk_path
break
self.card = self.device[:2] == "hd" and "host0" not in self.dev_path
print "[Harddisk] new device", self.device, '->', self.dev_path, '->', self.disk_path
if not removable and not self.card:
self.startIdle()
def __lt__(self, ob):
return self.device < ob.device
def partitionPath(self, n):
if self.type == DEVTYPE_UDEV:
if self.dev_path.startswith('/dev/mmcblk0'):
return self.dev_path + "p" + n
else:
return self.dev_path + n
elif self.type == DEVTYPE_DEVFS:
return self.dev_path + '/part' + n
def sysfsPath(self, filename):
return os.path.join('/sys/block/', self.device, filename)
def stop(self):
if self.timer:
self.timer.stop()
self.timer.callback.remove(self.runIdle)
def bus(self):
ret = _("External")
# SD/MMC(F1 specific)
if self.type == DEVTYPE_UDEV:
type_name = " (SD/MMC)"
# CF(7025 specific)
elif self.type == DEVTYPE_DEVFS:
type_name = " (CF)"
if self.card:
ret += type_name
else:
if self.internal:
ret = _("Internal")
if not self.rotational:
ret += " (SSD)"
return ret
def diskSize(self):
cap = 0
try:
line = readFile(self.sysfsPath('size'))
cap = int(line)
return cap / 1000 * 512 / 1000
except:
dev = self.findMount()
if dev:
try:
stat = os.statvfs(dev)
cap = int(stat.f_blocks * stat.f_bsize)
return cap / 1000 / 1000
except:
pass
return cap
def capacity(self):
cap = self.diskSize()
if cap == 0:
return ""
if cap < 1000:
return "%03d MB" % cap
return "%d.%03d GB" % (cap/1000, cap%1000)
def model(self):
try:
if self.device[:2] == "hd":
return readFile('/proc/ide/' + self.device + '/model')
elif self.device[:2] == "sd":
vendor = readFile(self.sysfsPath('device/vendor'))
model = readFile(self.sysfsPath('device/model'))
return vendor + '(' + model + ')'
elif self.device.startswith('mmcblk0'):
return readFile(self.sysfsPath('device/name'))
else:
raise Exception, "[Harddisk] no hdX or sdX or mmcX"
except Exception, e:
print "[Harddisk] Failed to get model:", e
return "-?-"
def free(self):
dev = self.findMount()
if dev:
stat = os.statvfs(dev)
return (stat.f_bfree/1000) * (stat.f_bsize/1024)
return -1
def numPartitions(self):
numPart = -1
if self.type == DEVTYPE_UDEV:
try:
devdir = os.listdir('/dev')
except OSError:
return -1
for filename in devdir:
if filename.startswith(self.device):
numPart += 1
elif self.type == DEVTYPE_DEVFS:
try:
idedir = os.listdir(self.dev_path)
except OSError:
return -1
for filename in idedir:
if filename.startswith("disc"):
numPart += 1
if filename.startswith("part"):
numPart += 1
return numPart
def mountDevice(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
self.mount_device = parts[0]
self.mount_path = parts[1]
return parts[1]
return None
def enumMountDevices(self):
for parts in getProcMounts():
if os.path.realpath(parts[0]).startswith(self.dev_path):
yield parts[1]
def findMount(self):
if self.mount_path is None:
return self.mountDevice()
return self.mount_path
def unmount(self):
dev = self.mountDevice()
if dev is None:
# not mounted, return OK
return 0
cmd = 'umount ' + dev
print "[Harddisk]", cmd
res = os.system(cmd)
return (res >> 8)
def createPartition(self):
cmd = 'printf "8,\n;0,0\n;0,0\n;0,0\ny\n" | sfdisk -f -uS ' + self.disk_path
res = os.system(cmd)
return (res >> 8)
def mkfs(self):
# No longer supported, use createInitializeJob instead
return 1
def mount(self):
# try mounting through fstab first
if self.mount_device is None:
dev = self.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.mount_device
try:
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
except IOError:
return -1
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if fspath == dev:
print "[Harddisk] mounting:", fspath
cmd = "mount -t auto " + fspath
res = os.system(cmd)
return (res >> 8)
# device is not in fstab
res = -1
if self.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
res = os.system('hdparm -z ' + self.disk_path)
# give udev some time to make the mount, which it will do asynchronously
from time import sleep
sleep(3)
return (res >> 8)
def fsck(self):
# No longer supported, use createCheckJob instead
return 1
def killPartitionTable(self):
zero = 512 * '\0'
h = open(self.dev_path, 'wb')
# delete first 9 sectors, which will likely kill the first partition too
for i in range(9):
h.write(zero)
h.close()
def killPartition(self, n):
zero = 512 * '\0'
part = self.partitionPath(n)
h = open(part, 'wb')
for i in range(3):
h.write(zero)
h.close()
def createInitializeJob(self):
job = Task.Job(_("Initializing storage device..."))
size = self.diskSize()
print "[HD] size: %s MB" % size
task = UnmountTask(job, self)
task = Task.PythonTask(job, _("Removing partition table"))
task.work = self.killPartitionTable
task.weighting = 1
task = Task.LoggingTask(job, _("Rereading partition table"))
task.weighting = 1
task.setTool('hdparm')
task.args.append('-z')
task.args.append(self.disk_path)
task = Task.ConditionTask(job, _("Waiting for partition"), timeoutCount=20)
task.check = lambda: not os.path.exists(self.partitionPath("1"))
task.weighting = 1
if os.path.exists('/usr/sbin/parted'):
use_parted = True
else:
if size > 2097151:
addInstallTask(job, 'parted')
use_parted = True
else:
use_parted = False
task = Task.LoggingTask(job, _("Creating partition"))
task.weighting = 5
if use_parted:
task.setTool('parted')
if size < 1024:
# On very small devices, align to block only
alignment = 'min'
else:
# Prefer optimal alignment for performance
alignment = 'opt'
if size > 2097151:
parttype = 'gpt'
else:
parttype = 'msdos'
task.args += ['-a', alignment, '-s', self.disk_path, 'mklabel', parttype, 'mkpart', 'primary', '0%', '100%']
else:
task.setTool('sfdisk')
task.args.append('-f')
task.args.append('-uS')
task.args.append(self.disk_path)
if size > 128000:
# Start at sector 8 to better support 4k aligned disks
print "[HD] Detected >128GB disk, using 4k alignment"
task.initial_input = "8,,L\n;0,0\n;0,0\n;0,0\ny\n"
else:
# Smaller disks (CF cards, sticks etc) don't need that
task.initial_input = ",,L\n;\n;\n;\ny\n"
task = Task.ConditionTask(job, _("Waiting for partition"))
task.check = lambda: os.path.exists(self.partitionPath("1"))
task.weighting = 1
task = MkfsTask(job, _("Creating filesystem"))
big_o_options = ["dir_index"]
if isFileSystemSupported("ext4"):
task.setTool("mkfs.ext4")
if size > 20000:
try:
version = map(int, open("/proc/version","r").read().split(' ', 4)[2].split('.',2)[:2])
if (version[0] > 3) or ((version[0] > 2) and (version[1] >= 2)):
# Linux version 3.2 supports bigalloc and -C option, use 256k blocks
task.args += ["-C", "262144"]
big_o_options.append("bigalloc")
except Exception, ex:
print "Failed to detect Linux version:", ex
else:
task.setTool("mkfs.ext3")
if size > 250000:
# No more than 256k i-nodes (prevent problems with fsck memory requirements)
task.args += ["-T", "largefile", "-N", "262144"]
big_o_options.append("sparse_super")
elif size > 16384:
# between 16GB and 250GB: 1 i-node per megabyte
task.args += ["-T", "largefile"]
big_o_options.append("sparse_super")
elif size > 2048:
# Over 2GB: 32 i-nodes per megabyte
task.args += ["-T", "largefile", "-N", str(size * 32)]
task.args += ["-m0", "-O", ",".join(big_o_options), self.partitionPath("1")]
task = MountTask(job, self)
task.weighting = 3
task = Task.ConditionTask(job, _("Waiting for mount"), timeoutCount=20)
task.check = self.mountDevice
task.weighting = 1
return job
def initialize(self):
# no longer supported
return -5
def check(self):
# no longer supported
return -5
def createCheckJob(self):
job = Task.Job(_("Checking filesystem..."))
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-f')
task.args.append('-p')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def createExt4ConversionJob(self):
if not isFileSystemSupported('ext4'):
raise Exception, _("You system does not support ext4")
job = Task.Job(_("Converting ext3 to ext4..."))
if not os.path.exists('/sbin/tune2fs'):
addInstallTask(job, 'e2fsprogs-tune2fs')
if self.findMount():
# Create unmount task if it was not mounted
UnmountTask(job, self)
dev = self.mount_device
else:
# otherwise, assume there is one partition
dev = self.partitionPath("1")
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext3')
task.args.append('-p')
task.args.append(dev)
task = Task.LoggingTask(job, "tune2fs")
task.setTool('tune2fs')
task.args.append('-O')
task.args.append('extents,uninit_bg,dir_index')
task.args.append('-o')
task.args.append('journal_data_writeback')
task.args.append(dev)
task = Task.LoggingTask(job, "fsck")
task.setTool('fsck.ext4')
task.postconditions = [] # ignore result, it will always "fail"
task.args.append('-f')
task.args.append('-p')
task.args.append('-D')
task.args.append(dev)
MountTask(job, self)
task = Task.ConditionTask(job, _("Waiting for mount"))
task.check = self.mountDevice
return job
def getDeviceDir(self):
return self.dev_path
def getDeviceName(self):
return self.disk_path
# the HDD idle poll daemon.
# as some harddrives have a buggy standby timer, we are doing this by hand here.
# first, we disable the hardware timer. then, we check every now and then if
# any access has been made to the disc. If there has been no access over a specifed time,
# we set the hdd into standby.
def readStats(self):
try:
l = open("/sys/block/%s/stat" % self.device).read()
except IOError:
return -1,-1
data = l.split(None,5)
return (int(data[0]), int(data[4]))
def startIdle(self):
from enigma import eTimer
# disable HDD standby timer
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--set=SCT=0", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-S0", self.disk_path))
self.timer = eTimer()
self.timer.callback.append(self.runIdle)
self.idle_running = True
self.setIdleTime(self.max_idle_time) # kick the idle polling loop
def runIdle(self):
if not self.max_idle_time:
return
t = time.time()
idle_time = t - self.last_access
stats = self.readStats()
l = sum(stats)
if l != self.last_stat and l >= 0: # access
self.last_stat = l
self.last_access = t
idle_time = 0
self.is_sleeping = False
if idle_time >= self.max_idle_time and not self.is_sleeping:
self.setSleep()
self.is_sleeping = True
def setSleep(self):
if self.bus() == _("External"):
Console().ePopen(("sdparm", "sdparm", "--flexible", "--readonly", "--command=stop", self.disk_path))
else:
Console().ePopen(("hdparm", "hdparm", "-y", self.disk_path))
def setIdleTime(self, idle):
self.max_idle_time = idle
if self.idle_running:
if not idle:
self.timer.stop()
else:
self.timer.start(idle * 100, False) # poll 10 times per period.
def isSleeping(self):
return self.is_sleeping
class Partition:
# for backward compatibility, force_mounted actually means "hotplug"
def __init__(self, mountpoint, device = None, description = "", force_mounted = False):
self.mountpoint = mountpoint
self.description = description
self.force_mounted = mountpoint and force_mounted
self.is_hotplug = force_mounted # so far; this might change.
self.device = device
def __str__(self):
return "Partition(mountpoint=%s,description=%s,device=%s)" % (self.mountpoint,self.description,self.device)
def stat(self):
if self.mountpoint:
return os.statvfs(self.mountpoint)
else:
raise OSError, "Device %s is not mounted" % self.device
def free(self):
try:
s = self.stat()
return s.f_bavail * s.f_bsize
except OSError:
return None
def total(self):
try:
s = self.stat()
return s.f_blocks * s.f_bsize
except OSError:
return None
def tabbedDescription(self):
if self.mountpoint.startswith('/media/net') or self.mountpoint.startswith('/media/autofs'):
# Network devices have a user defined name
return self.description
return self.description + '\t' + self.mountpoint
def mounted(self, mounts = None):
# THANK YOU PYTHON FOR STRIPPING AWAY f_fsid.
# TODO: can os.path.ismount be used?
if self.force_mounted:
return True
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for parts in mounts:
if self.mountpoint.startswith(parts[1]): # use startswith so a mount not ending with '/' is also detected.
return True
return False
def filesystem(self, mounts = None):
if self.mountpoint:
if mounts is None:
mounts = getProcMounts()
for fields in mounts:
if self.mountpoint.endswith('/') and not self.mountpoint == '/':
if fields[1] + '/' == self.mountpoint:
return fields[2]
else:
if fields[1] == self.mountpoint:
return fields[2]
return ''
DEVICEDB = \
{"dm8000":
{
# dm8000:
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.1/1-1.1:1.0": "Front USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.2/1-1.2:1.0": "Back, upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1.3/1-1.3:1.0": "Back, lower USB Slot",
"/devices/platform/brcm-ehci-1.1/usb2/2-1/2-1:1.0/host1/target1:0:0/1:0:0:0": "DVD Drive",
},
"dm800":
{
# dm800:
"/devices/platform/brcm-ehci.0/usb1/1-2/1-2:1.0": "Upper USB Slot",
"/devices/platform/brcm-ehci.0/usb1/1-1/1-1:1.0": "Lower USB Slot",
},
"dm7025":
{
# dm7025:
"/devices/pci0000:00/0000:00:14.1/ide1/1.0": "CF Card Slot", #hdc
"/devices/pci0000:00/0000:00:14.1/ide0/0.0": "Internal Harddisk"
}
}
def addInstallTask(job, package):
task = Task.LoggingTask(job, "update packages")
task.setTool('opkg')
task.args.append('update')
task = Task.LoggingTask(job, "Install " + package)
task.setTool('opkg')
task.args.append('install')
task.args.append(package)
class HarddiskManager:
def __init__(self):
self.hdd = [ ]
self.cd = ""
self.partitions = [ ]
self.devices_scanned_on_init = [ ]
self.on_partition_list_change = CList()
self.enumerateBlockDevices()
# Find stuff not detected by the enumeration
p = (
("/media/hdd", _("Hard disk")),
("/media/card", _("Card")),
("/media/cf", _("Compact flash")),
("/media/mmc1", _("MMC card")),
("/media/net", _("Network mount")),
("/media/net1", _("Network mount %s") % ("1")),
("/media/net2", _("Network mount %s") % ("2")),
("/media/net3", _("Network mount %s") % ("3")),
("/media/ram", _("Ram disk")),
("/media/usb", _("USB stick")),
("/", _("Internal flash"))
)
known = set([os.path.normpath(a.mountpoint) for a in self.partitions if a.mountpoint])
for m,d in p:
if (m not in known) and os.path.ismount(m):
self.partitions.append(Partition(mountpoint=m, description=d))
def getBlockDevInfo(self, blockdev):
devpath = "/sys/block/" + blockdev
error = False
removable = False
blacklisted = False
is_cdrom = False
partitions = []
try:
if os.path.exists(devpath + "/removable"):
removable = bool(int(readFile(devpath + "/removable")))
if os.path.exists(devpath + "/dev"):
dev = int(readFile(devpath + "/dev").split(':')[0])
else:
dev = None
if HardwareInfo().get_device_model().startswith('vusolo4k'):
devlist = [1, 7, 31, 253, 254, 179] # ram, loop, mtdblock, romblock, ramzswap, mmc
else:
devlist = [1, 7, 31, 253, 254] # ram, loop, mtdblock, romblock, ramzswap
if dev in devlist:
blacklisted = True
if blockdev[0:2] == 'sr':
is_cdrom = True
if blockdev[0:2] == 'hd':
try:
media = readFile("/proc/ide/%s/media" % blockdev)
if "cdrom" in media:
is_cdrom = True
except IOError:
error = True
# check for partitions
if not is_cdrom and os.path.exists(devpath):
for partition in os.listdir(devpath):
if partition[0:len(blockdev)] != blockdev:
continue
partitions.append(partition)
else:
self.cd = blockdev
except IOError:
error = True
# check for medium
medium_found = True
try:
open("/dev/" + blockdev).close()
except IOError, err:
if err.errno == 159: # no medium present
medium_found = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def enumerateBlockDevices(self):
print "[Harddisk] enumerating block devices..."
for blockdev in os.listdir("/sys/block"):
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.addHotplugPartition(blockdev)
if not error and not blacklisted and medium_found:
for part in partitions:
self.addHotplugPartition(part)
self.devices_scanned_on_init.append((blockdev, removable, is_cdrom, medium_found))
def getAutofsMountpoint(self, device):
r = self.getMountpoint(device)
if r is None:
return "/media/" + device
return r
def getMountpoint(self, device):
dev = "/dev/%s" % device
for item in getProcMounts():
if item[0] == dev:
return item[1]
return None
def addHotplugPartition(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = self.getMountpoint(device), description = description, force_mounted = True, device = device)
self.partitions.append(p)
if p.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("add", p)
# see if this is a harddrive
l = len(device)
if l and (not device[l-1].isdigit() or device == 'mmcblk0'):
self.hdd.append(Harddisk(device, removable))
self.hdd.sort()
SystemInfo["Harddisk"] = True
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def addHotplugAudiocd(self, device, physdev = None):
# device is the device name, without /dev
# physdev is the physical device path, which we (might) use to determine the userfriendly name
if not physdev:
dev, part = self.splitDeviceName(device)
try:
physdev = os.path.realpath('/sys/block/' + dev + '/device')[4:]
except OSError:
physdev = dev
print "couldn't determine blockdev physdev for device", device
error, blacklisted, removable, is_cdrom, partitions, medium_found = self.getBlockDevInfo(device)
if not blacklisted and medium_found:
description = self.getUserfriendlyDeviceName(device, physdev)
p = Partition(mountpoint = "/media/audiocd", description = description, force_mounted = True, device = device)
self.partitions.append(p)
self.on_partition_list_change("add", p)
SystemInfo["Harddisk"] = False
return error, blacklisted, removable, is_cdrom, partitions, medium_found
def removeHotplugPartition(self, device):
for x in self.partitions[:]:
if x.device == device:
self.partitions.remove(x)
if x.mountpoint: # Plugins won't expect unmounted devices
self.on_partition_list_change("remove", x)
l = len(device)
if l and not device[l-1].isdigit():
for hdd in self.hdd:
if hdd.device == device:
hdd.stop()
self.hdd.remove(hdd)
break
SystemInfo["Harddisk"] = len(self.hdd) > 0
def HDDCount(self):
return len(self.hdd)
def HDDList(self):
list = [ ]
for hd in self.hdd:
hdd = hd.model() + " - " + hd.bus()
cap = hd.capacity()
if cap != "":
hdd += " (" + cap + ")"
list.append((hdd, hd))
return list
def getCD(self):
return self.cd
def getMountedPartitions(self, onlyhotplug = False, mounts=None):
if mounts is None:
mounts = getProcMounts()
parts = [x for x in self.partitions if (x.is_hotplug or not onlyhotplug) and x.mounted(mounts)]
devs = set([x.device for x in parts])
for devname in devs.copy():
if not devname:
continue
dev, part = self.splitDeviceName(devname)
if part and dev in devs: # if this is a partition and we still have the wholedisk, remove wholedisk
devs.remove(dev)
# return all devices which are not removed due to being a wholedisk when a partition exists
return [x for x in parts if not x.device or x.device in devs]
def splitDeviceName(self, devname):
# this works for: sdaX, hdaX, sr0 (which is in fact dev="sr0", part=""). It doesn't work for other names like mtdblock3, but they are blacklisted anyway.
dev = devname[:3]
part = devname[3:]
for p in part:
if not p.isdigit():
return devname, 0
return dev, part and int(part) or 0
def getUserfriendlyDeviceName(self, dev, phys):
dev, part = self.splitDeviceName(dev)
description = _("External Storage %s") % dev
try:
description = readFile("/sys" + phys + "/model")
except IOError, s:
print "couldn't read model: ", s
from Tools.HardwareInfo import HardwareInfo
for physdevprefix, pdescription in DEVICEDB.get(HardwareInfo().device_name,{}).items():
if phys.startswith(physdevprefix):
description = pdescription
# not wholedisk and not partition 1
if part and part != 1:
description += _(" (Partition %d)") % part
return description
def addMountedPartition(self, device, desc):
for x in self.partitions:
if x.mountpoint == device:
#already_mounted
return
self.partitions.append(Partition(mountpoint=device, description=desc))
def removeMountedPartition(self, mountpoint):
for x in self.partitions[:]:
if x.mountpoint == mountpoint:
self.partitions.remove(x)
self.on_partition_list_change("remove", x)
def setDVDSpeed(self, device, speed = 0):
ioctl_flag=int(0x5322)
if not device.startswith('/'):
device = "/dev/" + device
try:
from fcntl import ioctl
cd = open(device)
ioctl(cd.fileno(), ioctl_flag, speed)
cd.close()
except Exception, ex:
print "[Harddisk] Failed to set %s speed to %s" % (device, speed), ex
class UnmountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Unmount"))
self.hdd = hdd
self.mountpoints = []
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
open('/dev/nomount.%s' % dev, "wb").close()
except Exception, e:
print "ERROR: Failed to create /dev/nomount file:", e
self.setTool('umount')
self.args.append('-f')
for dev in self.hdd.enumMountDevices():
self.args.append(dev)
self.postconditions.append(Task.ReturncodePostcondition())
self.mountpoints.append(dev)
if not self.mountpoints:
print "UnmountTask: No mountpoints found?"
self.cmd = 'true'
self.args = [self.cmd]
def afterRun(self):
for path in self.mountpoints:
try:
os.rmdir(path)
except Exception, ex:
print "Failed to remove path '%s':" % path, ex
class MountTask(Task.LoggingTask):
def __init__(self, job, hdd):
Task.LoggingTask.__init__(self, job, _("Mount"))
self.hdd = hdd
def prepare(self):
try:
dev = self.hdd.disk_path.split('/')[-1]
os.unlink('/dev/nomount.%s' % dev)
except Exception, e:
print "ERROR: Failed to remove /dev/nomount file:", e
# try mounting through fstab first
if self.hdd.mount_device is None:
dev = self.hdd.partitionPath("1")
else:
# if previously mounted, use the same spot
dev = self.hdd.mount_device
fstab = open("/etc/fstab")
lines = fstab.readlines()
fstab.close()
for line in lines:
parts = line.strip().split(" ")
fspath = os.path.realpath(parts[0])
if os.path.realpath(fspath) == dev:
self.setCmdline("mount -t auto " + fspath)
self.postconditions.append(Task.ReturncodePostcondition())
return
# device is not in fstab
if self.hdd.type == DEVTYPE_UDEV:
# we can let udev do the job, re-read the partition table
# Sorry for the sleep 2 hack...
self.setCmdline('sleep 2; hdparm -z ' + self.hdd.disk_path)
self.postconditions.append(Task.ReturncodePostcondition())
class MkfsTask(Task.LoggingTask):
def prepare(self):
self.fsck_state = None
def processOutput(self, data):
print "[Mkfs]", data
if 'Writing inode tables:' in data:
self.fsck_state = 'inode'
elif 'Creating journal' in data:
self.fsck_state = 'journal'
self.setProgress(80)
elif 'Writing superblocks ' in data:
self.setProgress(95)
elif self.fsck_state == 'inode':
if '/' in data:
try:
d = data.strip(' \x08\r\n').split('/',1)
if '\x08' in d[1]:
d[1] = d[1].split('\x08',1)[0]
self.setProgress(80*int(d[0])/int(d[1]))
except Exception, e:
print "[Mkfs] E:", e
return # don't log the progess
self.log.append(data)
harddiskmanager = HarddiskManager()
def isSleepStateDevice(device):
ret = os.popen("hdparm -C %s" % device).read()
if 'SG_IO' in ret or 'HDIO_DRIVE_CMD' in ret:
return None
if 'drive state is: standby' in ret or 'drive state is: idle' in ret:
return True
elif 'drive state is: active/idle' in ret:
return False
return None
def internalHDDNotSleeping(external=False):
state = False
if harddiskmanager.HDDCount():
for hdd in harddiskmanager.HDDList():
if hdd[1].internal or external:
if hdd[1].idle_running and hdd[1].max_idle_time and not hdd[1].isSleeping():
state = True
return state
SystemInfo["ext4"] = isFileSystemSupported("ext4")
| gpl-2.0 | -7,066,420,760,872,267,000 | 28.726154 | 155 | 0.660042 | false |
bluesea/zulip | tools/deprecated/finbot/monthdelta.py | 115 | 6015 | """monthdelta
Date calculation with months: MonthDelta class and monthmod() function.
"""
__all__ = ['MonthDelta', 'monthmod']
from datetime import date, timedelta
class MonthDelta:
"""Number of months offset from a date or datetime.
MonthDeltas allow date calculation without regard to the different lengths
of different months. A MonthDelta value added to a date produces another
date that has the same day-of-the-month, regardless of the lengths of the
intervening months. If the resulting date is in too short a month, the
last day in that month will result:
date(2008,1,30) + MonthDelta(1) -> date(2008,2,29)
MonthDeltas may be added, subtracted, multiplied, and floor-divided
similarly to timedeltas. They may not be added to timedeltas directly, as
both classes are intended to be used directly with dates and datetimes.
Only ints may be passed to the constructor. MonthDeltas are immutable.
NOTE: in calculations involving the 29th, 30th, and 31st days of the
month, MonthDeltas are not necessarily invertible [i.e., the result above
would not imply that date(2008,2,29) - MonthDelta(1) -> date(2008,1,30)].
"""
__slots__ = ('__months',)
def __init__(self, months=1):
if not isinstance(months, int):
raise TypeError('months must be an integer')
self.__months = months
def months(self):
return self.__months
months = property(months)
def __repr__(self):
try:
return 'MonthDelta({0})'.format(self.__months)
except AttributeError:
return 'MonthDelta(' + str(self.__months) + ')'
def __str__(self):
return str(self.__months) + ' month' + ((abs(self.__months) != 1
and 's') or '')
def __hash__(self):
return hash(self.__months)
def __eq__(self, other):
if isinstance(other, MonthDelta):
return (self.__months == other.months)
return False
def __ne__(self, other):
if isinstance(other, MonthDelta):
return (self.__months != other.months)
return True
def __lt__(self, other):
if isinstance(other, MonthDelta):
return (self.__months < other.months)
return NotImplemented
def __le__(self, other):
if isinstance(other, MonthDelta):
return (self.__months <= other.months)
return NotImplemented
def __gt__(self, other):
if isinstance(other, MonthDelta):
return (self.__months > other.months)
return NotImplemented
def __ge__(self, other):
if isinstance(other, MonthDelta):
return (self.__months >= other.months)
return NotImplemented
def __add__(self, other):
if isinstance(other, MonthDelta):
return MonthDelta(self.__months + other.months)
if isinstance(other, date):
day = other.day
# subract one because months are not zero-based
month = other.month + self.__months - 1
year = other.year + month // 12
# now add it back
month = month % 12 + 1
if month == 2:
if day >= 29 and not year%4 and (year%100 or not year%400):
day = 29
elif day > 28:
day = 28
elif month in (4,6,9,11) and day > 30:
day = 30
try:
return other.replace(year, month, day)
except ValueError:
raise OverflowError('date value out of range')
return NotImplemented
def __sub__(self, other):
if isinstance(other, MonthDelta):
return MonthDelta(self.__months - other.months)
return NotImplemented
def __mul__(self, other):
if isinstance(other, int):
return MonthDelta(self.__months * other)
return NotImplemented
def __floordiv__(self, other):
# MonthDelta // MonthDelta -> int
if isinstance(other, MonthDelta):
return self.__months // other.months
if isinstance(other, int):
return MonthDelta(self.__months // other)
return NotImplemented
def __radd__(self, other):
return self + other
def __rsub__(self, other):
return -self + other
def __rmul__(self, other):
return self * other
def __ifloordiv__(self, other):
# in-place division by a MonthDelta (which will change the variable's
# type) is almost certainly a bug -- raising this error is the reason
# we don't just fall back on __floordiv__
if isinstance(other, MonthDelta):
raise TypeError('in-place division of a MonthDelta requires an '
'integer divisor')
if isinstance(other, int):
return MonthDelta(self.__months // other)
return NotImplemented
def __neg__(self):
return MonthDelta(-self.__months)
def __pos__(self):
return MonthDelta(+self.__months)
def __abs__(self):
return MonthDelta(abs(self.__months))
def __bool__(self):
return bool(self.__months)
__nonzero__ = __bool__
def monthmod(start, end):
"""Months between dates, plus leftover time.
Distribute the interim between start and end dates into MonthDelta and
timedelta portions. If and only if start is after end, returned MonthDelta
will be negative. Returned timedelta is always non-negative, and is always
smaller than the month in which the end date occurs.
Invariant: dt + monthmod(dt, dt+td)[0] + monthmod(dt, dt+td)[1] = dt + td
"""
if not (isinstance(start, date) and isinstance(end, date)):
raise TypeError('start and end must be dates')
md = MonthDelta(12*(end.year - start.year) + end.month - start.month -
int(start.day > end.day))
# will overflow (underflow?) for end near date.min
return md, end - (start + md)
| apache-2.0 | -8,215,349,254,980,246,000 | 38.834437 | 78 | 0.594846 | false |
Niknakflak/-tg-station | tools/midi2piano/pyperclip/windows.py | 110 | 5405 | """
This module implements clipboard handling on Windows using ctypes.
"""
import time
import contextlib
import ctypes
from ctypes import c_size_t, sizeof, c_wchar_p, get_errno, c_wchar
from .exceptions import PyperclipWindowsException
class CheckedCall(object):
def __init__(self, f):
super(CheckedCall, self).__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
from ctypes.wintypes import (HGLOBAL, LPVOID, DWORD, LPCSTR, INT, HWND,
HINSTANCE, HMENU, BOOL, UINT, HANDLE)
windll = ctypes.windll
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
INT, INT, HWND, HMENU, HINSTANCE, LPVOID]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(0, b"STATIC", None, 0, 0, 0, 0, 0,
None, None, None, None)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = len(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE,
count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar))
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
| agpl-3.0 | -7,541,857,786,765,652,000 | 34.794702 | 102 | 0.619426 | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/cryptography/hazmat/backends/openssl/encode_asn1.py | 3 | 23428 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import calendar
import ipaddress
import six
from cryptography import utils, x509
from cryptography.hazmat.backends.openssl.decode_asn1 import (
_CRL_ENTRY_REASON_ENUM_TO_CODE, _DISTPOINT_TYPE_FULLNAME,
_DISTPOINT_TYPE_RELATIVENAME
)
from cryptography.x509.name import _ASN1Type
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, OCSPExtensionOID,
)
def _encode_asn1_int(backend, x):
"""
Converts a python integer to an ASN1_INTEGER. The returned ASN1_INTEGER
will not be garbage collected (to support adding them to structs that take
ownership of the object). Be sure to register it for GC if it will be
discarded after use.
"""
# Convert Python integer to OpenSSL "bignum" in case value exceeds
# machine's native integer limits (note: `int_to_bn` doesn't automatically
# GC).
i = backend._int_to_bn(x)
i = backend._ffi.gc(i, backend._lib.BN_free)
# Wrap in an ASN.1 integer. Don't GC -- as documented.
i = backend._lib.BN_to_ASN1_INTEGER(i, backend._ffi.NULL)
backend.openssl_assert(i != backend._ffi.NULL)
return i
def _encode_asn1_int_gc(backend, x):
i = _encode_asn1_int(backend, x)
i = backend._ffi.gc(i, backend._lib.ASN1_INTEGER_free)
return i
def _encode_asn1_str(backend, data):
"""
Create an ASN1_OCTET_STRING from a Python byte string.
"""
s = backend._lib.ASN1_OCTET_STRING_new()
res = backend._lib.ASN1_OCTET_STRING_set(s, data, len(data))
backend.openssl_assert(res == 1)
return s
def _encode_asn1_utf8_str(backend, string):
"""
Create an ASN1_UTF8STRING from a Python unicode string.
This object will be an ASN1_STRING with UTF8 type in OpenSSL and
can be decoded with ASN1_STRING_to_UTF8.
"""
s = backend._lib.ASN1_UTF8STRING_new()
res = backend._lib.ASN1_STRING_set(
s, string.encode("utf8"), len(string.encode("utf8"))
)
backend.openssl_assert(res == 1)
return s
def _encode_asn1_str_gc(backend, data):
s = _encode_asn1_str(backend, data)
s = backend._ffi.gc(s, backend._lib.ASN1_OCTET_STRING_free)
return s
def _encode_inhibit_any_policy(backend, inhibit_any_policy):
return _encode_asn1_int_gc(backend, inhibit_any_policy.skip_certs)
def _encode_name(backend, name):
"""
The X509_NAME created will not be gc'd. Use _encode_name_gc if needed.
"""
subject = backend._lib.X509_NAME_new()
for rdn in name.rdns:
set_flag = 0 # indicate whether to add to last RDN or create new RDN
for attribute in rdn:
name_entry = _encode_name_entry(backend, attribute)
# X509_NAME_add_entry dups the object so we need to gc this copy
name_entry = backend._ffi.gc(
name_entry, backend._lib.X509_NAME_ENTRY_free
)
res = backend._lib.X509_NAME_add_entry(
subject, name_entry, -1, set_flag)
backend.openssl_assert(res == 1)
set_flag = -1
return subject
def _encode_name_gc(backend, attributes):
subject = _encode_name(backend, attributes)
subject = backend._ffi.gc(subject, backend._lib.X509_NAME_free)
return subject
def _encode_sk_name_entry(backend, attributes):
"""
The sk_X509_NAME_ENTRY created will not be gc'd.
"""
stack = backend._lib.sk_X509_NAME_ENTRY_new_null()
for attribute in attributes:
name_entry = _encode_name_entry(backend, attribute)
res = backend._lib.sk_X509_NAME_ENTRY_push(stack, name_entry)
backend.openssl_assert(res >= 1)
return stack
def _encode_name_entry(backend, attribute):
if attribute._type is _ASN1Type.BMPString:
value = attribute.value.encode('utf_16_be')
else:
value = attribute.value.encode('utf8')
obj = _txt2obj_gc(backend, attribute.oid.dotted_string)
name_entry = backend._lib.X509_NAME_ENTRY_create_by_OBJ(
backend._ffi.NULL, obj, attribute._type.value, value, len(value)
)
return name_entry
def _encode_crl_number_delta_crl_indicator(backend, ext):
return _encode_asn1_int_gc(backend, ext.crl_number)
def _encode_issuing_dist_point(backend, ext):
idp = backend._lib.ISSUING_DIST_POINT_new()
backend.openssl_assert(idp != backend._ffi.NULL)
idp = backend._ffi.gc(idp, backend._lib.ISSUING_DIST_POINT_free)
idp.onlyuser = 255 if ext.only_contains_user_certs else 0
idp.onlyCA = 255 if ext.only_contains_ca_certs else 0
idp.indirectCRL = 255 if ext.indirect_crl else 0
idp.onlyattr = 255 if ext.only_contains_attribute_certs else 0
if ext.only_some_reasons:
idp.onlysomereasons = _encode_reasonflags(
backend, ext.only_some_reasons
)
if ext.full_name:
idp.distpoint = _encode_full_name(backend, ext.full_name)
if ext.relative_name:
idp.distpoint = _encode_relative_name(backend, ext.relative_name)
return idp
def _encode_crl_reason(backend, crl_reason):
asn1enum = backend._lib.ASN1_ENUMERATED_new()
backend.openssl_assert(asn1enum != backend._ffi.NULL)
asn1enum = backend._ffi.gc(asn1enum, backend._lib.ASN1_ENUMERATED_free)
res = backend._lib.ASN1_ENUMERATED_set(
asn1enum, _CRL_ENTRY_REASON_ENUM_TO_CODE[crl_reason.reason]
)
backend.openssl_assert(res == 1)
return asn1enum
def _encode_invalidity_date(backend, invalidity_date):
time = backend._lib.ASN1_GENERALIZEDTIME_set(
backend._ffi.NULL, calendar.timegm(
invalidity_date.invalidity_date.timetuple()
)
)
backend.openssl_assert(time != backend._ffi.NULL)
time = backend._ffi.gc(time, backend._lib.ASN1_GENERALIZEDTIME_free)
return time
def _encode_certificate_policies(backend, certificate_policies):
cp = backend._lib.sk_POLICYINFO_new_null()
backend.openssl_assert(cp != backend._ffi.NULL)
cp = backend._ffi.gc(cp, backend._lib.sk_POLICYINFO_free)
for policy_info in certificate_policies:
pi = backend._lib.POLICYINFO_new()
backend.openssl_assert(pi != backend._ffi.NULL)
res = backend._lib.sk_POLICYINFO_push(cp, pi)
backend.openssl_assert(res >= 1)
oid = _txt2obj(backend, policy_info.policy_identifier.dotted_string)
pi.policyid = oid
if policy_info.policy_qualifiers:
pqis = backend._lib.sk_POLICYQUALINFO_new_null()
backend.openssl_assert(pqis != backend._ffi.NULL)
for qualifier in policy_info.policy_qualifiers:
pqi = backend._lib.POLICYQUALINFO_new()
backend.openssl_assert(pqi != backend._ffi.NULL)
res = backend._lib.sk_POLICYQUALINFO_push(pqis, pqi)
backend.openssl_assert(res >= 1)
if isinstance(qualifier, six.text_type):
pqi.pqualid = _txt2obj(
backend, x509.OID_CPS_QUALIFIER.dotted_string
)
pqi.d.cpsuri = _encode_asn1_str(
backend,
qualifier.encode("ascii"),
)
else:
assert isinstance(qualifier, x509.UserNotice)
pqi.pqualid = _txt2obj(
backend, x509.OID_CPS_USER_NOTICE.dotted_string
)
un = backend._lib.USERNOTICE_new()
backend.openssl_assert(un != backend._ffi.NULL)
pqi.d.usernotice = un
if qualifier.explicit_text:
un.exptext = _encode_asn1_utf8_str(
backend, qualifier.explicit_text
)
un.noticeref = _encode_notice_reference(
backend, qualifier.notice_reference
)
pi.qualifiers = pqis
return cp
def _encode_notice_reference(backend, notice):
if notice is None:
return backend._ffi.NULL
else:
nr = backend._lib.NOTICEREF_new()
backend.openssl_assert(nr != backend._ffi.NULL)
# organization is a required field
nr.organization = _encode_asn1_utf8_str(backend, notice.organization)
notice_stack = backend._lib.sk_ASN1_INTEGER_new_null()
nr.noticenos = notice_stack
for number in notice.notice_numbers:
num = _encode_asn1_int(backend, number)
res = backend._lib.sk_ASN1_INTEGER_push(notice_stack, num)
backend.openssl_assert(res >= 1)
return nr
def _txt2obj(backend, name):
"""
Converts a Python string with an ASN.1 object ID in dotted form to a
ASN1_OBJECT.
"""
name = name.encode('ascii')
obj = backend._lib.OBJ_txt2obj(name, 1)
backend.openssl_assert(obj != backend._ffi.NULL)
return obj
def _txt2obj_gc(backend, name):
obj = _txt2obj(backend, name)
obj = backend._ffi.gc(obj, backend._lib.ASN1_OBJECT_free)
return obj
def _encode_ocsp_nocheck(backend, ext):
# Doesn't need to be GC'd
return backend._lib.ASN1_NULL_new()
def _encode_key_usage(backend, key_usage):
set_bit = backend._lib.ASN1_BIT_STRING_set_bit
ku = backend._lib.ASN1_BIT_STRING_new()
ku = backend._ffi.gc(ku, backend._lib.ASN1_BIT_STRING_free)
res = set_bit(ku, 0, key_usage.digital_signature)
backend.openssl_assert(res == 1)
res = set_bit(ku, 1, key_usage.content_commitment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 2, key_usage.key_encipherment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 3, key_usage.data_encipherment)
backend.openssl_assert(res == 1)
res = set_bit(ku, 4, key_usage.key_agreement)
backend.openssl_assert(res == 1)
res = set_bit(ku, 5, key_usage.key_cert_sign)
backend.openssl_assert(res == 1)
res = set_bit(ku, 6, key_usage.crl_sign)
backend.openssl_assert(res == 1)
if key_usage.key_agreement:
res = set_bit(ku, 7, key_usage.encipher_only)
backend.openssl_assert(res == 1)
res = set_bit(ku, 8, key_usage.decipher_only)
backend.openssl_assert(res == 1)
else:
res = set_bit(ku, 7, 0)
backend.openssl_assert(res == 1)
res = set_bit(ku, 8, 0)
backend.openssl_assert(res == 1)
return ku
def _encode_authority_key_identifier(backend, authority_keyid):
akid = backend._lib.AUTHORITY_KEYID_new()
backend.openssl_assert(akid != backend._ffi.NULL)
akid = backend._ffi.gc(akid, backend._lib.AUTHORITY_KEYID_free)
if authority_keyid.key_identifier is not None:
akid.keyid = _encode_asn1_str(
backend,
authority_keyid.key_identifier,
)
if authority_keyid.authority_cert_issuer is not None:
akid.issuer = _encode_general_names(
backend, authority_keyid.authority_cert_issuer
)
if authority_keyid.authority_cert_serial_number is not None:
akid.serial = _encode_asn1_int(
backend, authority_keyid.authority_cert_serial_number
)
return akid
def _encode_basic_constraints(backend, basic_constraints):
constraints = backend._lib.BASIC_CONSTRAINTS_new()
constraints = backend._ffi.gc(
constraints, backend._lib.BASIC_CONSTRAINTS_free
)
constraints.ca = 255 if basic_constraints.ca else 0
if basic_constraints.ca and basic_constraints.path_length is not None:
constraints.pathlen = _encode_asn1_int(
backend, basic_constraints.path_length
)
return constraints
def _encode_authority_information_access(backend, authority_info_access):
aia = backend._lib.sk_ACCESS_DESCRIPTION_new_null()
backend.openssl_assert(aia != backend._ffi.NULL)
aia = backend._ffi.gc(
aia,
lambda x: backend._lib.sk_ACCESS_DESCRIPTION_pop_free(
x, backend._ffi.addressof(
backend._lib._original_lib, "ACCESS_DESCRIPTION_free"
)
)
)
for access_description in authority_info_access:
ad = backend._lib.ACCESS_DESCRIPTION_new()
method = _txt2obj(
backend, access_description.access_method.dotted_string
)
_encode_general_name_preallocated(
backend, access_description.access_location, ad.location
)
ad.method = method
res = backend._lib.sk_ACCESS_DESCRIPTION_push(aia, ad)
backend.openssl_assert(res >= 1)
return aia
def _encode_general_names(backend, names):
general_names = backend._lib.GENERAL_NAMES_new()
backend.openssl_assert(general_names != backend._ffi.NULL)
for name in names:
gn = _encode_general_name(backend, name)
res = backend._lib.sk_GENERAL_NAME_push(general_names, gn)
backend.openssl_assert(res != 0)
return general_names
def _encode_alt_name(backend, san):
general_names = _encode_general_names(backend, san)
general_names = backend._ffi.gc(
general_names, backend._lib.GENERAL_NAMES_free
)
return general_names
def _encode_subject_key_identifier(backend, ski):
return _encode_asn1_str_gc(backend, ski.digest)
def _encode_general_name(backend, name):
gn = backend._lib.GENERAL_NAME_new()
_encode_general_name_preallocated(backend, name, gn)
return gn
def _encode_general_name_preallocated(backend, name, gn):
if isinstance(name, x509.DNSName):
backend.openssl_assert(gn != backend._ffi.NULL)
gn.type = backend._lib.GEN_DNS
ia5 = backend._lib.ASN1_IA5STRING_new()
backend.openssl_assert(ia5 != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
value = name.value.encode("utf8")
res = backend._lib.ASN1_STRING_set(ia5, value, len(value))
backend.openssl_assert(res == 1)
gn.d.dNSName = ia5
elif isinstance(name, x509.RegisteredID):
backend.openssl_assert(gn != backend._ffi.NULL)
gn.type = backend._lib.GEN_RID
obj = backend._lib.OBJ_txt2obj(
name.value.dotted_string.encode('ascii'), 1
)
backend.openssl_assert(obj != backend._ffi.NULL)
gn.d.registeredID = obj
elif isinstance(name, x509.DirectoryName):
backend.openssl_assert(gn != backend._ffi.NULL)
dir_name = _encode_name(backend, name.value)
gn.type = backend._lib.GEN_DIRNAME
gn.d.directoryName = dir_name
elif isinstance(name, x509.IPAddress):
backend.openssl_assert(gn != backend._ffi.NULL)
if isinstance(name.value, ipaddress.IPv4Network):
packed = (
name.value.network_address.packed +
utils.int_to_bytes(((1 << 32) - name.value.num_addresses), 4)
)
elif isinstance(name.value, ipaddress.IPv6Network):
packed = (
name.value.network_address.packed +
utils.int_to_bytes((1 << 128) - name.value.num_addresses, 16)
)
else:
packed = name.value.packed
ipaddr = _encode_asn1_str(backend, packed)
gn.type = backend._lib.GEN_IPADD
gn.d.iPAddress = ipaddr
elif isinstance(name, x509.OtherName):
backend.openssl_assert(gn != backend._ffi.NULL)
other_name = backend._lib.OTHERNAME_new()
backend.openssl_assert(other_name != backend._ffi.NULL)
type_id = backend._lib.OBJ_txt2obj(
name.type_id.dotted_string.encode('ascii'), 1
)
backend.openssl_assert(type_id != backend._ffi.NULL)
data = backend._ffi.new("unsigned char[]", name.value)
data_ptr_ptr = backend._ffi.new("unsigned char **")
data_ptr_ptr[0] = data
value = backend._lib.d2i_ASN1_TYPE(
backend._ffi.NULL, data_ptr_ptr, len(name.value)
)
if value == backend._ffi.NULL:
backend._consume_errors()
raise ValueError("Invalid ASN.1 data")
other_name.type_id = type_id
other_name.value = value
gn.type = backend._lib.GEN_OTHERNAME
gn.d.otherName = other_name
elif isinstance(name, x509.RFC822Name):
backend.openssl_assert(gn != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
data = name.value.encode("utf8")
asn1_str = _encode_asn1_str(backend, data)
gn.type = backend._lib.GEN_EMAIL
gn.d.rfc822Name = asn1_str
elif isinstance(name, x509.UniformResourceIdentifier):
backend.openssl_assert(gn != backend._ffi.NULL)
# ia5strings are supposed to be ITU T.50 but to allow round-tripping
# of broken certs that encode utf8 we'll encode utf8 here too.
data = name.value.encode("utf8")
asn1_str = _encode_asn1_str(backend, data)
gn.type = backend._lib.GEN_URI
gn.d.uniformResourceIdentifier = asn1_str
else:
raise ValueError(
"{} is an unknown GeneralName type".format(name)
)
def _encode_extended_key_usage(backend, extended_key_usage):
eku = backend._lib.sk_ASN1_OBJECT_new_null()
eku = backend._ffi.gc(eku, backend._lib.sk_ASN1_OBJECT_free)
for oid in extended_key_usage:
obj = _txt2obj(backend, oid.dotted_string)
res = backend._lib.sk_ASN1_OBJECT_push(eku, obj)
backend.openssl_assert(res >= 1)
return eku
_CRLREASONFLAGS = {
x509.ReasonFlags.key_compromise: 1,
x509.ReasonFlags.ca_compromise: 2,
x509.ReasonFlags.affiliation_changed: 3,
x509.ReasonFlags.superseded: 4,
x509.ReasonFlags.cessation_of_operation: 5,
x509.ReasonFlags.certificate_hold: 6,
x509.ReasonFlags.privilege_withdrawn: 7,
x509.ReasonFlags.aa_compromise: 8,
}
def _encode_reasonflags(backend, reasons):
bitmask = backend._lib.ASN1_BIT_STRING_new()
backend.openssl_assert(bitmask != backend._ffi.NULL)
for reason in reasons:
res = backend._lib.ASN1_BIT_STRING_set_bit(
bitmask, _CRLREASONFLAGS[reason], 1
)
backend.openssl_assert(res == 1)
return bitmask
def _encode_full_name(backend, full_name):
dpn = backend._lib.DIST_POINT_NAME_new()
backend.openssl_assert(dpn != backend._ffi.NULL)
dpn.type = _DISTPOINT_TYPE_FULLNAME
dpn.name.fullname = _encode_general_names(backend, full_name)
return dpn
def _encode_relative_name(backend, relative_name):
dpn = backend._lib.DIST_POINT_NAME_new()
backend.openssl_assert(dpn != backend._ffi.NULL)
dpn.type = _DISTPOINT_TYPE_RELATIVENAME
dpn.name.relativename = _encode_sk_name_entry(backend, relative_name)
return dpn
def _encode_cdps_freshest_crl(backend, cdps):
cdp = backend._lib.sk_DIST_POINT_new_null()
cdp = backend._ffi.gc(cdp, backend._lib.sk_DIST_POINT_free)
for point in cdps:
dp = backend._lib.DIST_POINT_new()
backend.openssl_assert(dp != backend._ffi.NULL)
if point.reasons:
dp.reasons = _encode_reasonflags(backend, point.reasons)
if point.full_name:
dp.distpoint = _encode_full_name(backend, point.full_name)
if point.relative_name:
dp.distpoint = _encode_relative_name(backend, point.relative_name)
if point.crl_issuer:
dp.CRLissuer = _encode_general_names(backend, point.crl_issuer)
res = backend._lib.sk_DIST_POINT_push(cdp, dp)
backend.openssl_assert(res >= 1)
return cdp
def _encode_name_constraints(backend, name_constraints):
nc = backend._lib.NAME_CONSTRAINTS_new()
backend.openssl_assert(nc != backend._ffi.NULL)
nc = backend._ffi.gc(nc, backend._lib.NAME_CONSTRAINTS_free)
permitted = _encode_general_subtree(
backend, name_constraints.permitted_subtrees
)
nc.permittedSubtrees = permitted
excluded = _encode_general_subtree(
backend, name_constraints.excluded_subtrees
)
nc.excludedSubtrees = excluded
return nc
def _encode_policy_constraints(backend, policy_constraints):
pc = backend._lib.POLICY_CONSTRAINTS_new()
backend.openssl_assert(pc != backend._ffi.NULL)
pc = backend._ffi.gc(pc, backend._lib.POLICY_CONSTRAINTS_free)
if policy_constraints.require_explicit_policy is not None:
pc.requireExplicitPolicy = _encode_asn1_int(
backend, policy_constraints.require_explicit_policy
)
if policy_constraints.inhibit_policy_mapping is not None:
pc.inhibitPolicyMapping = _encode_asn1_int(
backend, policy_constraints.inhibit_policy_mapping
)
return pc
def _encode_general_subtree(backend, subtrees):
if subtrees is None:
return backend._ffi.NULL
else:
general_subtrees = backend._lib.sk_GENERAL_SUBTREE_new_null()
for name in subtrees:
gs = backend._lib.GENERAL_SUBTREE_new()
gs.base = _encode_general_name(backend, name)
res = backend._lib.sk_GENERAL_SUBTREE_push(general_subtrees, gs)
assert res >= 1
return general_subtrees
def _encode_nonce(backend, nonce):
return _encode_asn1_str_gc(backend, nonce.nonce)
_EXTENSION_ENCODE_HANDLERS = {
ExtensionOID.BASIC_CONSTRAINTS: _encode_basic_constraints,
ExtensionOID.SUBJECT_KEY_IDENTIFIER: _encode_subject_key_identifier,
ExtensionOID.KEY_USAGE: _encode_key_usage,
ExtensionOID.SUBJECT_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.EXTENDED_KEY_USAGE: _encode_extended_key_usage,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier,
ExtensionOID.CERTIFICATE_POLICIES: _encode_certificate_policies,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_encode_authority_information_access
),
ExtensionOID.CRL_DISTRIBUTION_POINTS: _encode_cdps_freshest_crl,
ExtensionOID.FRESHEST_CRL: _encode_cdps_freshest_crl,
ExtensionOID.INHIBIT_ANY_POLICY: _encode_inhibit_any_policy,
ExtensionOID.OCSP_NO_CHECK: _encode_ocsp_nocheck,
ExtensionOID.NAME_CONSTRAINTS: _encode_name_constraints,
ExtensionOID.POLICY_CONSTRAINTS: _encode_policy_constraints,
}
_CRL_EXTENSION_ENCODE_HANDLERS = {
ExtensionOID.ISSUER_ALTERNATIVE_NAME: _encode_alt_name,
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: _encode_authority_key_identifier,
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: (
_encode_authority_information_access
),
ExtensionOID.CRL_NUMBER: _encode_crl_number_delta_crl_indicator,
ExtensionOID.DELTA_CRL_INDICATOR: _encode_crl_number_delta_crl_indicator,
ExtensionOID.ISSUING_DISTRIBUTION_POINT: _encode_issuing_dist_point,
}
_CRL_ENTRY_EXTENSION_ENCODE_HANDLERS = {
CRLEntryExtensionOID.CERTIFICATE_ISSUER: _encode_alt_name,
CRLEntryExtensionOID.CRL_REASON: _encode_crl_reason,
CRLEntryExtensionOID.INVALIDITY_DATE: _encode_invalidity_date,
}
_OCSP_REQUEST_EXTENSION_ENCODE_HANDLERS = {
OCSPExtensionOID.NONCE: _encode_nonce,
}
_OCSP_BASICRESP_EXTENSION_ENCODE_HANDLERS = {
OCSPExtensionOID.NONCE: _encode_nonce,
}
| apache-2.0 | -2,780,456,879,164,116,000 | 34.659056 | 79 | 0.648071 | false |
mafiya69/sympy | sympy/physics/quantum/matrixutils.py | 87 | 10287 | """Utilities to deal with sympy.Matrix, numpy and scipy.sparse."""
from __future__ import print_function, division
from sympy import Matrix, I, Expr, Integer
from sympy.core.compatibility import range
from sympy.matrices import eye, zeros
from sympy.external import import_module
__all__ = [
'numpy_ndarray',
'scipy_sparse_matrix',
'sympy_to_numpy',
'sympy_to_scipy_sparse',
'numpy_to_sympy',
'scipy_sparse_to_sympy',
'flatten_scalar',
'matrix_dagger',
'to_sympy',
'to_numpy',
'to_scipy_sparse',
'matrix_tensor_product',
'matrix_zeros'
]
# Conditionally define the base classes for numpy and scipy.sparse arrays
# for use in isinstance tests.
np = import_module('numpy')
if not np:
class numpy_ndarray(object):
pass
else:
numpy_ndarray = np.ndarray
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
if not scipy:
class scipy_sparse_matrix(object):
pass
sparse = None
else:
sparse = scipy.sparse
# Try to find spmatrix.
if hasattr(sparse, 'base'):
# Newer versions have it under scipy.sparse.base.
scipy_sparse_matrix = sparse.base.spmatrix
elif hasattr(sparse, 'sparse'):
# Older versions have it under scipy.sparse.sparse.
scipy_sparse_matrix = sparse.sparse.spmatrix
def sympy_to_numpy(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
if not np:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, Matrix):
return np.matrix(m.tolist(), dtype=dtype)
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def sympy_to_scipy_sparse(m, **options):
"""Convert a sympy Matrix/complex number to a numpy matrix or scalar."""
if not np or not sparse:
raise ImportError
dtype = options.get('dtype', 'complex')
if isinstance(m, Matrix):
return sparse.csr_matrix(np.matrix(m.tolist(), dtype=dtype))
elif isinstance(m, Expr):
if m.is_Number or m.is_NumberSymbol or m == I:
return complex(m)
raise TypeError('Expected Matrix or complex scalar, got: %r' % m)
def scipy_sparse_to_sympy(m, **options):
"""Convert a scipy.sparse matrix to a sympy matrix."""
return Matrix(m.todense())
def numpy_to_sympy(m, **options):
"""Convert a numpy matrix to a sympy matrix."""
return Matrix(m)
def to_sympy(m, **options):
"""Convert a numpy/scipy.sparse matrix to a sympy matrix."""
if isinstance(m, Matrix):
return m
elif isinstance(m, numpy_ndarray):
return numpy_to_sympy(m)
elif isinstance(m, scipy_sparse_matrix):
return scipy_sparse_to_sympy(m)
elif isinstance(m, Expr):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_numpy(m, **options):
"""Convert a sympy/scipy.sparse matrix to a numpy matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_numpy(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
return m
elif isinstance(m, scipy_sparse_matrix):
return m.todense()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def to_scipy_sparse(m, **options):
"""Convert a sympy/numpy matrix to a scipy.sparse matrix."""
dtype = options.get('dtype', 'complex')
if isinstance(m, (Matrix, Expr)):
return sympy_to_scipy_sparse(m, dtype=dtype)
elif isinstance(m, numpy_ndarray):
if not sparse:
raise ImportError
return sparse.csr_matrix(m)
elif isinstance(m, scipy_sparse_matrix):
return m
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % m)
def flatten_scalar(e):
"""Flatten a 1x1 matrix to a scalar, return larger matrices unchanged."""
if isinstance(e, Matrix):
if e.shape == (1, 1):
e = e[0]
if isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
if e.shape == (1, 1):
e = complex(e[0, 0])
return e
def matrix_dagger(e):
"""Return the dagger of a sympy/numpy/scipy.sparse matrix."""
if isinstance(e, Matrix):
return e.H
elif isinstance(e, (numpy_ndarray, scipy_sparse_matrix)):
return e.conjugate().transpose()
raise TypeError('Expected sympy/numpy/scipy.sparse matrix, got: %r' % e)
# TODO: Move this into sympy.matricies.
def _sympy_tensor_product(*matrices):
"""Compute the tensor product of a sequence of sympy Matrices.
This is the standard Kronecker product of matrices [1].
Parameters
==========
matrices : tuple of Matrix instances
The matrices to take the tensor product of.
Returns
=======
matrix : Matrix
The tensor product matrix.
Examples
========
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum.matrixutils import _sympy_tensor_product
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> _sympy_tensor_product(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> _sympy_tensor_product(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
References
==========
[1] http://en.wikipedia.org/wiki/Kronecker_product
"""
# Make sure we have a sequence of Matrices
if not all(isinstance(m, Matrix) for m in matrices):
raise TypeError(
'Sequence of Matrices expected, got: %s' % repr(matrices)
)
# Pull out the first element in the product.
matrix_expansion = matrices[-1]
# Do the tensor product working from right to left.
for mat in reversed(matrices[:-1]):
rows = mat.rows
cols = mat.cols
# Go through each row appending tensor product to.
# running matrix_expansion.
for i in range(rows):
start = matrix_expansion*mat[i*cols]
# Go through each column joining each item
for j in range(cols - 1):
start = start.row_join(
matrix_expansion*mat[i*cols + j + 1]
)
# If this is the first element, make it the start of the
# new row.
if i == 0:
next = start
else:
next = next.col_join(start)
matrix_expansion = next
return matrix_expansion
def _numpy_tensor_product(*product):
"""numpy version of tensor product of multiple arguments."""
if not np:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = np.kron(answer, item)
return answer
def _scipy_sparse_tensor_product(*product):
"""scipy.sparse version of tensor product of multiple arguments."""
if not sparse:
raise ImportError
answer = product[0]
for item in product[1:]:
answer = sparse.kron(answer, item)
# The final matrices will just be multiplied, so csr is a good final
# sparse format.
return sparse.csr_matrix(answer)
def matrix_tensor_product(*product):
"""Compute the matrix tensor product of sympy/numpy/scipy.sparse matrices."""
if isinstance(product[0], Matrix):
return _sympy_tensor_product(*product)
elif isinstance(product[0], numpy_ndarray):
return _numpy_tensor_product(*product)
elif isinstance(product[0], scipy_sparse_matrix):
return _scipy_sparse_tensor_product(*product)
def _numpy_eye(n):
"""numpy version of complex eye."""
if not np:
raise ImportError
return np.matrix(np.eye(n, dtype='complex'))
def _scipy_sparse_eye(n):
"""scipy.sparse version of complex eye."""
if not sparse:
raise ImportError
return sparse.eye(n, n, dtype='complex')
def matrix_eye(n, **options):
"""Get the version of eye and tensor_product for a given format."""
format = options.get('format', 'sympy')
if format == 'sympy':
return eye(n)
elif format == 'numpy':
return _numpy_eye(n)
elif format == 'scipy.sparse':
return _scipy_sparse_eye(n)
raise NotImplementedError('Invalid format: %r' % format)
def _numpy_zeros(m, n, **options):
"""numpy verson of zeros."""
dtype = options.get('dtype', 'float64')
if not np:
raise ImportError
return np.zeros((m, n), dtype=dtype)
def _scipy_sparse_zeros(m, n, **options):
"""scipy.sparse verson of zeros."""
spmatrix = options.get('spmatrix', 'csr')
dtype = options.get('dtype', 'float64')
if not sparse:
raise ImportError
if spmatrix == 'lil':
return sparse.lil_matrix((m, n), dtype=dtype)
elif spmatrix == 'csr':
return sparse.csr_matrix((m, n), dtype=dtype)
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
dtype = options.get('dtype', 'float64')
spmatrix = options.get('spmatrix', 'csr')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format)
def _numpy_matrix_to_zero(e):
"""Convert a numpy zero matrix to the zero scalar."""
if not np:
raise ImportError
test = np.zeros_like(e)
if np.allclose(e, test):
return 0.0
else:
return e
def _scipy_sparse_matrix_to_zero(e):
"""Convert a scipy.sparse zero matrix to the zero scalar."""
if not np:
raise ImportError
edense = e.todense()
test = np.zeros_like(edense)
if np.allclose(edense, test):
return 0.0
else:
return e
def matrix_to_zero(e):
"""Convert a zero matrix to the scalar zero."""
if isinstance(e, Matrix):
if zeros(*e.shape) == e:
e = Integer(0)
elif isinstance(e, numpy_ndarray):
e = _numpy_matrix_to_zero(e)
elif isinstance(e, scipy_sparse_matrix):
e = _scipy_sparse_matrix_to_zero(e)
return e
| bsd-3-clause | 4,157,450,685,658,916,400 | 28.731214 | 81 | 0.61427 | false |
moondrop-entertainment/django-nonrel-drawp | tests/regressiontests/test_utils/tests.py | 49 | 4833 | import sys
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from models import Person
if sys.version_info >= (2, 5):
from tests_25 import AssertNumQueriesContextManagerTests
class SkippingTestCase(TestCase):
def test_skip_unless_db_feature(self):
"A test that might be skipped is actually called."
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
self.assertRaises(ValueError, test_func)
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
self.assertRaises(ValueError,
self.assertNumQueries, 2, test_func
)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name='test')
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1,
self.client.get,
"/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class SaveRestoreWarningState(TestCase):
def test_save_restore_warnings_state(self):
"""
Ensure save_warnings_state/restore_warnings_state work correctly.
"""
# In reality this test could be satisfied by many broken implementations
# of save_warnings_state/restore_warnings_state (e.g. just
# warnings.resetwarnings()) , but it is difficult to test more.
import warnings
self.save_warnings_state()
class MyWarning(Warning):
pass
# Add a filter that causes an exception to be thrown, so we can catch it
warnings.simplefilter("error", MyWarning)
self.assertRaises(Warning, lambda: warnings.warn("warn", MyWarning))
# Now restore.
self.restore_warnings_state()
# After restoring, we shouldn't get an exception. But we don't want a
# warning printed either, so we have to silence the warning.
warnings.simplefilter("ignore", MyWarning)
warnings.warn("warn", MyWarning)
# Remove the filter we just added.
self.restore_warnings_state()
__test__ = {"API_TEST": r"""
# Some checks of the doctest output normalizer.
# Standard doctests do fairly
>>> from django.utils import simplejson
>>> from django.utils.xmlutils import SimplerXMLGenerator
>>> from StringIO import StringIO
>>> def produce_long():
... return 42L
>>> def produce_int():
... return 42
>>> def produce_json():
... return simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2), 'whiz': 42}])
>>> def produce_xml():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startDocument()
... xml.startElement("foo", {"aaa" : "1.0", "bbb": "2.0"})
... xml.startElement("bar", {"ccc" : "3.0"})
... xml.characters("Hello")
... xml.endElement("bar")
... xml.startElement("whiz", {})
... xml.characters("Goodbye")
... xml.endElement("whiz")
... xml.endElement("foo")
... xml.endDocument()
... return stream.getvalue()
>>> def produce_xml_fragment():
... stream = StringIO()
... xml = SimplerXMLGenerator(stream, encoding='utf-8')
... xml.startElement("foo", {"aaa": "1.0", "bbb": "2.0"})
... xml.characters("Hello")
... xml.endElement("foo")
... xml.startElement("bar", {"ccc": "3.0", "ddd": "4.0"})
... xml.endElement("bar")
... return stream.getvalue()
# Long values are normalized and are comparable to normal integers ...
>>> produce_long()
42
# ... and vice versa
>>> produce_int()
42L
# JSON output is normalized for field order, so it doesn't matter
# which order json dictionary attributes are listed in output
>>> produce_json()
'["foo", {"bar": ["baz", null, 1.0, 2], "whiz": 42}]'
>>> produce_json()
'["foo", {"whiz": 42, "bar": ["baz", null, 1.0, 2]}]'
# XML output is normalized for attribute order, so it doesn't matter
# which order XML element attributes are listed in output
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo aaa="1.0" bbb="2.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml()
'<?xml version="1.0" encoding="UTF-8"?>\n<foo bbb="2.0" aaa="1.0"><bar ccc="3.0">Hello</bar><whiz>Goodbye</whiz></foo>'
>>> produce_xml_fragment()
'<foo aaa="1.0" bbb="2.0">Hello</foo><bar ccc="3.0" ddd="4.0"></bar>'
>>> produce_xml_fragment()
'<foo bbb="2.0" aaa="1.0">Hello</foo><bar ddd="4.0" ccc="3.0"></bar>'
"""}
| bsd-3-clause | -6,226,504,442,065,110,000 | 31.006623 | 119 | 0.614111 | false |
esakellari/my_root_for_test | interpreter/llvm/src/utils/lldbDataFormatters.py | 20 | 3286 | """
LLDB Formatters for LLVM data types.
Load into LLDB with 'command script import /path/to/lldbDataFormatters.py'
"""
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('type category define -e llvm -l c++')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVectorImpl<.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVector<.+,.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.ArrayRefSynthProvider '
'-x "^llvm::ArrayRef<.+>$"')
# Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl
class SmallVectorSynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
begin = self.begin.GetValueAsUnsigned(0)
end = self.end.GetValueAsUnsigned(0)
return (end - begin)/self.type_size
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
# Do bounds checking.
if index < 0:
return None
if index >= self.num_children():
return None;
offset = index * self.type_size
return self.begin.CreateChildAtOffset('['+str(index)+']',
offset, self.data_type)
def update(self):
self.begin = self.valobj.GetChildMemberWithName('BeginX')
self.end = self.valobj.GetChildMemberWithName('EndX')
the_type = self.valobj.GetType()
# If this is a reference type we have to dereference it to get to the
# template parameter.
if the_type.IsReferenceType():
the_type = the_type.GetDereferencedType()
self.data_type = the_type.GetTemplateArgumentType(0)
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
class ArrayRefSynthProvider:
""" Provider for llvm::ArrayRef """
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
return self.length
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
if index < 0 or index >= self.num_children():
return None;
offset = index * self.type_size
return self.data.CreateChildAtOffset('[' + str(index) + ']',
offset, self.data_type)
def update(self):
self.data = self.valobj.GetChildMemberWithName('Data')
length_obj = self.valobj.GetChildMemberWithName('Length')
self.length = length_obj.GetValueAsUnsigned(0)
self.data_type = self.data.GetType().GetPointeeType()
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
| lgpl-2.1 | -4,810,880,401,290,550,000 | 36.340909 | 77 | 0.585514 | false |
NukeAOSP/external_chromium | chrome/common/extensions/docs/build/build.py | 65 | 8905 | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Docbuilder for extension docs."""
import os
import os.path
import shutil
import sys
import time
import urllib
from subprocess import Popen, PIPE
from optparse import OptionParser
_script_path = os.path.realpath(__file__)
_build_dir = os.path.dirname(_script_path)
_base_dir = os.path.normpath(_build_dir + "/..")
_webkit_dir = _base_dir + "/../../../../third_party/WebKit"
_devtools_dir = _webkit_dir + "/Source/WebCore/inspector/front-end"
_static_dir = _base_dir + "/static"
_js_dir = _base_dir + "/js"
_template_dir = _base_dir + "/template"
_samples_dir = _base_dir + "/examples"
_extension_api_dir = os.path.normpath(_base_dir + "/../api")
_extension_api_json = _extension_api_dir + "/extension_api.json"
_devtools_api_json = _devtools_dir + "/ExtensionAPISchema.json"
_api_template_html = _template_dir + "/api_template.html"
_page_shell_html = _template_dir + "/page_shell.html"
_generator_html = _build_dir + "/generator.html"
_samples_json = _base_dir + "/samples.json"
_expected_output_preamble = "#BEGIN"
_expected_output_postamble = "#END"
# HACK! This is required because we can only depend on python 2.4 and
# the calling environment may not be setup to set the PYTHONPATH
sys.path.append(os.path.normpath(_base_dir +
"/../../../../third_party"))
import simplejson as json
from directory import Sample
from directory import ApiManifest
from directory import SamplesManifest
def RenderPages(names, dump_render_tree):
"""
Calls DumpRenderTree .../generator.html?<names> and writes the
results to .../docs/<name>.html
"""
if not names:
raise Exception("RenderPage called with empty names param")
generator_url = "file:" + urllib.pathname2url(_generator_html)
generator_url += "?" + ",".join(names)
# Start with a fresh copy of page shell for each file.
# Save the current contents so that we can look for changes later.
originals = {}
for name in names:
input_file = _base_dir + "/" + name + ".html"
if (os.path.isfile(input_file)):
originals[name] = open(input_file, 'rb').read()
os.remove(input_file)
else:
originals[name] = ""
shutil.copy(_page_shell_html, input_file)
# Run DumpRenderTree and capture result
dump_render_tree_timeout = 1000 * 60 * 5 # five minutes
p = Popen(
[dump_render_tree, "--test-shell",
"%s %s" % (generator_url, dump_render_tree_timeout)],
stdout=PIPE)
# The remaining output will be the content of the generated pages.
output = p.stdout.read()
# Parse out just the JSON part.
begin = output.find(_expected_output_preamble)
end = output.rfind(_expected_output_postamble)
if (begin < 0 or end < 0):
raise Exception("%s returned invalid output:\n\n%s" %
(dump_render_tree, output))
begin += len(_expected_output_preamble)
try:
output_parsed = json.loads(output[begin:end])
except ValueError, msg:
raise Exception("Could not parse DumpRenderTree output as JSON. Error: " +
msg + "\n\nOutput was:\n" + output)
changed_files = []
for name in names:
result = output_parsed[name].encode("utf8") + '\n'
# Remove CRs that are appearing from captured DumpRenderTree output.
result = result.replace('\r', '')
# Remove page_shell
input_file = _base_dir + "/" + name + ".html"
os.remove(input_file)
# Write output
open(input_file, 'wb').write(result)
if (originals[name] and result != originals[name]):
changed_files.append(input_file)
return changed_files
def FindDumpRenderTree():
# This is hacky. It is used to guess the location of the DumpRenderTree
chrome_dir = os.path.normpath(_base_dir + "/../../../")
src_dir = os.path.normpath(chrome_dir + "/../")
search_locations = []
if (sys.platform in ('cygwin', 'win32')):
home_dir = os.path.normpath(os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH"))
search_locations.append(chrome_dir + "/Release/DumpRenderTree.exe")
search_locations.append(chrome_dir + "/Debug/DumpRenderTree.exe")
search_locations.append(home_dir + "/bin/DumpRenderTree/"
"DumpRenderTree.exe")
if (sys.platform in ('linux', 'linux2')):
search_locations.append(src_dir + "/sconsbuild/Release/DumpRenderTree")
search_locations.append(src_dir + "/out/Release/DumpRenderTree")
search_locations.append(src_dir + "/sconsbuild/Debug/DumpRenderTree")
search_locations.append(src_dir + "/out/Debug/DumpRenderTree")
search_locations.append(os.getenv("HOME") + "/bin/DumpRenderTree/"
"DumpRenderTree")
if (sys.platform == 'darwin'):
search_locations.append(src_dir +
"/xcodebuild/Release/DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
search_locations.append(src_dir +
"/xcodebuild/Debug/DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
search_locations.append(os.getenv("HOME") + "/bin/DumpRenderTree/" +
"DumpRenderTree.app/Contents/MacOS/DumpRenderTree")
for loc in search_locations:
if os.path.isfile(loc):
return loc
raise Exception("Could not find DumpRenderTree executable\n"
"**DumpRenderTree may need to be built**\n"
"Searched: \n" + "\n".join(search_locations) + "\n"
"To specify a path to DumpRenderTree use "
"--dump-render-tree-path")
def GetStaticFileNames():
static_files = os.listdir(_static_dir)
return set(os.path.splitext(file_name)[0]
for file_name in static_files
if file_name.endswith(".html") and not file_name.startswith("."))
def main():
# Prevent windows from using cygwin python.
if (sys.platform == "cygwin"):
sys.exit("Building docs not supported for cygwin python. Please run the "
"build.sh script instead, which uses depot_tools python.")
parser = OptionParser()
parser.add_option("--dump-render-tree-path", dest="dump_render_tree_path",
metavar="PATH",
help="path to DumpRenderTree executable")
parser.add_option("--page-name", dest="page_name", metavar="PAGE",
help="only generate docs for PAGE.html")
parser.add_option("--nozip", dest="zips", action="store_false",
help="do not generate zip files for samples",
default=True)
options, args = parser.parse_args()
if (options.dump_render_tree_path and
os.path.isfile(options.dump_render_tree_path)):
dump_render_tree = options.dump_render_tree_path
else:
dump_render_tree = FindDumpRenderTree()
# Load the manifest of existing API Methods
api_manifest = ApiManifest(_extension_api_json)
# DevTools API is maintained separately, in WebCore land
devtools_api_manifest = ApiManifest(_devtools_api_json)
# Read static file names
static_names = GetStaticFileNames()
# Read module names
module_names = (api_manifest.getModuleNames() |
devtools_api_manifest.getModuleNames())
# All pages to generate
page_names = static_names | module_names
# Allow the user to render a single page if they want
if options.page_name:
if options.page_name in page_names:
page_names = [options.page_name]
else:
raise Exception("--page-name argument must be one of %s." %
', '.join(sorted(page_names)))
# Render a manifest file containing metadata about all the extension samples
samples_manifest = SamplesManifest(_samples_dir, _base_dir, api_manifest)
samples_manifest.writeToFile(_samples_json)
# Write zipped versions of the samples listed in the manifest to the
# filesystem, unless the user has disabled it
if options.zips:
modified_zips = samples_manifest.writeZippedSamples()
else:
modified_zips = []
modified_files = RenderPages(page_names, dump_render_tree)
modified_files.extend(modified_zips)
if len(modified_files) == 0:
print "Output files match existing files. No changes made."
else:
print ("ATTENTION: EXTENSION DOCS HAVE CHANGED\n" +
"The following files have been modified and should be checked\n" +
"into source control (ideally in the same changelist as the\n" +
"underlying files that resulting in their changing).")
for f in modified_files:
print " * %s" % f
# Hack. Sleep here, otherwise windows doesn't properly close the debug.log
# and the os.remove will fail with a "Permission denied".
time.sleep(1)
debug_log = os.path.normpath(_build_dir + "/" + "debug.log")
if (os.path.isfile(debug_log)):
os.remove(debug_log)
if 'EX_OK' in dir(os):
return os.EX_OK
else:
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 1,454,790,565,341,433,300 | 34.907258 | 79 | 0.662774 | false |
yarothetimble/todo | todo/settings.py | 1 | 3215 | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3jjx0203l9=el9k%4x$jbw+y)q!+_l3=sd!l_d_a)mez1vb4uv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'tasks.apps.TasksConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'todo_db',
'USER': 'todo_usr',
'PASSWORD': 'todo_pw',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| mit | -4,724,965,881,914,911,000 | 24.72 | 91 | 0.676516 | false |
walchko/pygecko | retired/old_version/original/bin/mjpeg_server.py | 1 | 4736 | #!/usr/bin/env python
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import time
import argparse
from opencvutils import Camera
import socket as Socket
# import errno
# threaded version
# http://stackoverflow.com/questions/12650238/processing-simultaneous-asynchronous-requests-with-python-basehttpserver
# not sure flask is any better:
# https://blog.miguelgrinberg.com/post/video-streaming-with-flask
#``mjpeg_server``
# * only handles one connection at a time ... make threaded?
# * sometimes the video stream is slow to load, but then it works fine
# * handle client disconnect (broken pipe - 32) better
def compress(orig, comp):
return float(orig) / float(comp)
class mjpgServer(BaseHTTPRequestHandler):
"""
A simple mjpeg server that either publishes images directly from a camera
or republishes images from another pygecko process.
"""
cam = None
cameratype = 'cv'
host = None
win = (640, 480)
def __del__(self):
if self.cam:
self.cam.close()
self.cam = None
print 'Exiting mjpgServer'
def setUpCamera(self):
"""
cv - camera number, usually 0
pi - set to True
"""
print 'window size:', self.win
if self.cameratype == 'pi':
self.cam = Camera('pi')
self.cam.init(win=self.win)
elif self.cameratype == 'cv':
self.cam = Camera('cv')
self.cam.init(cameraNumber='cv', win=self.win)
else:
raise Exception('Error, you must specify "cv" or "pi" for camera type')
time.sleep(3)
def do_GET(self):
print 'connection from:', self.address_string()
if self.path == '/mjpg':
print 'mjpg'
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
while True:
if self.cam:
# print 'grab image'
ret, img = self.cam.read()
else:
# print 'setupcamera()'
self.setUpCamera()
ret = False
# ret, img = self.getImage()
if not ret:
# print 'crap'
time.sleep(1)
continue
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write("--jpgboundary")
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
time.sleep(0.05)
elif self.path == '/':
ip = self.host[0]
port = self.host[1]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s}:{1!s}</h1>'.format(ip, port))
self.wfile.write('<img src="http://{}:{}/mjpg"/>'.format(ip, port))
self.wfile.write('<p>{0!s}</p>'.format((self.version_string())))
self.wfile.write('</p></ul>')
self.wfile.write('<p>This only handles one connection at a time</p>')
self.wfile.write('</body></html>')
else:
print 'error', self.path
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s} not found</h1>'.format(self.path))
self.wfile.write('</body></html>')
def handleArgs():
parser = argparse.ArgumentParser(description='A simple mjpeg server Example: mjpeg-server -p 8080 --camera 4')
parser.add_argument('-p', '--port', help='local publisher port, default is 9000', type=int, default=9000)
# parser.add_argument('-c', '--camera', help='set opencv camera number, ex. -c 1', type=int, default=0)
parser.add_argument('-t', '--type', help='set type of camera: cv or pi, ex. -t pi', default='cv')
parser.add_argument('-s', '--size', help='set size', nargs=2, type=int, default=(640, 480))
# parser.add_argument('-r', '--remote', help='remote host image subscription info, hostname/ip port, ex: 1.2.3.4 9000', nargs=2, default=('0.0.0.0', 9000))
args = vars(parser.parse_args())
args['size'] = (args['size'][0], args['size'][1])
# args['remote'] = (args['remote'][0], args['remote'][1])
return args
def main():
args = handleArgs()
# figure out host info
hostname = Socket.gethostname()
if hostname.find('.local') == -1:
hostname += '.local'
ip = Socket.gethostbyname(hostname)
hostinfo = (ip, args['port'])
try:
mjpgServer.topic = 'image_color'
mjpgServer.cameratype = 'pi'
mjpgServer.host = hostinfo
mjpgServer.win = args['size']
server = HTTPServer(hostinfo, mjpgServer)
print "server started on: {}:{}".format(ip, args['port'])
server.serve_forever()
except KeyboardInterrupt:
print 'KeyboardInterrupt'
server.socket.close()
exit(0)
if __name__ == '__main__':
main()
| mit | -4,184,404,571,273,389,600 | 27.878049 | 156 | 0.660895 | false |
fjbatresv/odoo | addons/membership/membership.py | 21 | 27956 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
STATE_PRIOR = {
'none': 0,
'canceled': 1,
'old': 2,
'waiting': 3,
'invoiced': 4,
'free': 6,
'paid': 7
}
class membership_line(osv.osv):
'''Member line'''
def _get_partners(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if partner.member_lines:
list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context)
return list_membership_line
def _get_membership_lines(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context):
if invoice.invoice_line:
list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context)
return list_membership_line
def _check_membership_date(self, cr, uid, ids, context=None):
"""Check if membership product is not in the past
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param context: A standard dictionary for contextual values
"""
cr.execute('''
SELECT MIN(ml.date_to - ai.date_invoice)
FROM membership_membership_line ml
JOIN account_invoice_line ail ON (
ml.account_invoice_line = ail.id
)
JOIN account_invoice ai ON (
ai.id = ail.invoice_id)
WHERE ml.id IN %s''', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[0] and r[0] < 0:
return False
return True
def _state(self, cr, uid, ids, name, args, context=None):
"""Compute the state lines
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of state Value
"""
res = {}
inv_obj = self.pool.get('account.invoice')
for line in self.browse(cr, uid, ids, context=context):
cr.execute('''
SELECT i.state, i.id FROM
account_invoice i
WHERE
i.id = (
SELECT l.invoice_id FROM
account_invoice_line l WHERE
l.id = (
SELECT ml.account_invoice_line FROM
membership_membership_line ml WHERE
ml.id = %s
)
)
''', (line.id,))
fetched = cr.fetchone()
if not fetched:
res[line.id] = 'canceled'
continue
istate = fetched[0]
state = 'none'
if (istate == 'draft') | (istate == 'proforma'):
state = 'waiting'
elif istate == 'open':
state = 'invoiced'
elif istate == 'paid':
state = 'paid'
inv = inv_obj.browse(cr, uid, fetched[1], context=context)
for payment in inv.payment_ids:
if payment.invoice and payment.invoice.type == 'out_refund':
state = 'canceled'
elif istate == 'cancel':
state = 'canceled'
res[line.id] = state
return res
_description = __doc__
_name = 'membership.membership_line'
_columns = {
'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1),
'membership_id': fields.many2one('product.product', string="Membership", required=True),
'date_from': fields.date('From', readonly=True),
'date_to': fields.date('To', readonly=True),
'date_cancel': fields.date('Cancel date'),
'date': fields.date('Join Date', help="Date on which member has joined the membership"),
'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'),
'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True),
'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True),
'state': fields.function(_state,
string='Membership Status', type='selection',
selection=STATE, store = {
'account.invoice': (_get_membership_lines, ['state'], 10),
'res.partner': (_get_partners, ['membership_state'], 12),
}, help="""It indicates the membership status.
-Non Member: A member who has not applied for any membership.
-Cancelled Member: A member who has cancelled his membership.
-Old Member: A member whose membership date has expired.
-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.
-Invoiced Member: A member whose invoice has been created.
-Paid Member: A member who has paid the membership amount."""),
'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True)
}
_rec_name = 'partner'
_order = 'id desc'
_constraints = [
(_check_membership_date, 'Error, this membership product is out of date', [])
]
class Partner(osv.osv):
'''Partner'''
_inherit = 'res.partner'
def _get_partner_id(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
res_obj = self.pool.get('res.partner')
data_inv = member_line_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _get_invoice_partner(self, cr, uid, ids, context=None):
inv_obj = self.pool.get('account.invoice')
res_obj = self.pool.get('res.partner')
data_inv = inv_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner_id.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _cron_update_membership(self, cr, uid, context=None):
partner_ids = self.search(cr, uid, [('membership_state', '=', 'paid')], context=context)
if partner_ids:
self._store_set_values(cr, uid, partner_ids, ['membership_state'], context=context)
def _membership_state(self, cr, uid, ids, name, args, context=None):
"""This Function return Membership State For Given Partner.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Partner IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of Membership state Value
"""
res = {}
for id in ids:
res[id] = 'none'
today = time.strftime('%Y-%m-%d')
for id in ids:
partner_data = self.browse(cr, uid, id, context=context)
if partner_data.membership_cancel and today > partner_data.membership_cancel:
res[id] = 'free' if partner_data.free_member else 'canceled'
continue
if partner_data.membership_stop and today > partner_data.membership_stop:
res[id] = 'free' if partner_data.free_member else 'old'
continue
s = 4
if partner_data.member_lines:
for mline in partner_data.member_lines:
if mline.date_to >= today and mline.date_from < today:
if mline.account_invoice_line and mline.account_invoice_line.invoice_id:
mstate = mline.account_invoice_line.invoice_id.state
if mstate == 'paid':
s = 0
inv = mline.account_invoice_line.invoice_id
for payment in inv.payment_ids:
if payment.invoice.type == 'out_refund':
s = 2
break
elif mstate == 'open' and s!=0:
s = 1
elif mstate == 'cancel' and s!=0 and s!=1:
s = 2
elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1:
s = 3
if s==4:
for mline in partner_data.member_lines:
if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and (mline.account_invoice_line and mline.account_invoice_line.invoice_id.state) == 'paid':
s = 5
else:
s = 6
if s==0:
res[id] = 'paid'
elif s==1:
res[id] = 'invoiced'
elif s==2:
res[id] = 'canceled'
elif s==3:
res[id] = 'waiting'
elif s==5:
res[id] = 'old'
elif s==6:
res[id] = 'none'
if partner_data.free_member and s!=0:
res[id] = 'free'
if partner_data.associate_member:
res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context)
res[id] = res_state[partner_data.associate_member.id]
return res
def _membership_date(self, cr, uid, ids, name, args, context=None):
"""Return date of membership"""
name = name[0]
res = {}
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.browse(cr, uid, ids, context=context):
if partner.associate_member:
partner_id = partner.associate_member.id
else:
partner_id = partner.id
res[partner.id] = {
'membership_start': False,
'membership_stop': False,
'membership_cancel': False
}
if name == 'membership_start':
line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_from', context=context)
if line_id:
res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]],
['date_from'], context=context)[0]['date_from']
if name == 'membership_stop':
line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_to desc', context=context)
if line_id1:
res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]],
['date_to'], context=context)[0]['date_to']
if name == 'membership_cancel':
if partner.membership_state == 'canceled':
line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context)
if line_id2:
res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel']
return res
def _get_partners(self, cr, uid, ids, context=None):
ids2 = ids
while ids2:
ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
ids += ids2
return ids
def __get_membership_state(self, *args, **kwargs):
return self._membership_state(*args, **kwargs)
_columns = {
'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."),
'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'),
'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."),
'membership_amount': fields.float(
'Membership Amount', digits=(16, 2),
help = 'The price negotiated by the partner'),
'membership_state': fields.function(
__get_membership_state,
string = 'Current Membership Status', type = 'selection',
selection = STATE,
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help='It indicates the membership state.\n'
'-Non Member: A partner who has not applied for any membership.\n'
'-Cancelled Member: A member who has cancelled his membership.\n'
'-Old Member: A member whose membership date has expired.\n'
'-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n'
'-Invoiced Member: A member whose invoice has been created.\n'
'-Paying member: A member who has paid the membership fee.'),
'membership_start': fields.function(
_membership_date, multi = 'membeship_start',
string = 'Membership Start Date', type = 'date',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10, ),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date from which membership becomes active."),
'membership_stop': fields.function(
_membership_date,
string = 'Membership End Date', type='date', multi='membership_stop',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date until which membership remains active."),
'membership_cancel': fields.function(
_membership_date,
string = 'Cancel Membership Date', type='date', multi='membership_cancel',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 11),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date on which membership has been cancelled"),
}
_defaults = {
'free_member': False,
'membership_cancel': False,
}
def _check_recursion(self, cr, uid, ids, context=None):
"""Check Recursive for Associated Members.
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member'])
]
def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None):
""" Create Customer Invoice of Membership for partners.
@param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership.
datas = {'membership_product_id': None, 'amount': None}
"""
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_tax_obj = self.pool.get('account.invoice.tax')
product_id = product_id or datas.get('membership_product_id', False)
amount = datas.get('amount', 0.0)
invoice_list = []
if type(ids) in (int, long,):
ids = [ids]
for partner in self.browse(cr, uid, ids, context=context):
account_id = partner.property_account_receivable and partner.property_account_receivable.id or False
fpos_id = partner.property_account_position and partner.property_account_position.id or False
addr = self.address_get(cr, uid, [partner.id], ['invoice'])
if partner.free_member:
raise osv.except_osv(_('Error!'),
_("Partner is a free Member."))
if not addr.get('invoice', False):
raise osv.except_osv(_('Error!'),
_("Partner doesn't have an address to make the invoice."))
quantity = 1
line_value = {
'product_id': product_id,
}
line_dict = invoice_line_obj.product_id_change(cr, uid, {},
product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context)
line_value.update(line_dict['value'])
line_value['price_unit'] = amount
if line_value.get('invoice_line_tax_id', False):
tax_tab = [(6, 0, line_value['invoice_line_tax_id'])]
line_value['invoice_line_tax_id'] = tax_tab
invoice_id = invoice_obj.create(cr, uid, {
'partner_id': partner.id,
'account_id': account_id,
'fiscal_position': fpos_id or False
}, context=context)
line_value['invoice_id'] = invoice_id
invoice_line_obj.create(cr, uid, line_value, context=context)
invoice_list.append(invoice_id)
if line_value['invoice_line_tax_id']:
tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values()
for tax in tax_value:
invoice_tax_obj.create(cr, uid, tax, context=context)
#recompute the membership_state of those partners
self.pool.get('res.partner').write(cr, uid, ids, {})
return invoice_list
class Product(osv.osv):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
model_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
if ('product' in context) and (context['product']=='membership_product'):
model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context)
dict_model = {}
for i in resource_id_form:
dict_model[i['name']] = i['res_id']
if view_type == 'form':
view_id = dict_model['membership_products_form']
else:
view_id = dict_model['membership_products_tree']
return super(Product,self).fields_view_get(cr, user, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
'''Product'''
_inherit = 'product.template'
_columns = {
'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'),
'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'),
'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'),
}
_sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')]
_defaults = {
'membership': False,
}
class Invoice(osv.osv):
'''Invoice'''
_inherit = 'account.invoice'
def action_cancel(self, cr, uid, ids, context=None):
'''Create a 'date_cancel' on the membership_line object'''
member_line_obj = self.pool.get('membership.membership_line')
today = time.strftime('%Y-%m-%d')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.write(cr, uid, mlines, {'date_cancel': today})
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
class account_invoice_line(osv.osv):
_inherit='account.invoice.line'
def write(self, cr, uid, ids, vals, context=None):
"""Overrides orm write method
"""
member_line_obj = self.pool.get('membership.membership_line')
res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context)
for line in self.browse(cr, uid, ids, context=context):
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line has changed to a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id.id,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
if line.product_id and not line.product_id.membership and ml_ids:
# Product line has changed to a non membership product
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
"""Remove Membership Line Record for Account Invoice Line
"""
member_line_obj = self.pool.get('membership.membership_line')
for id in ids:
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context)
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return super(account_invoice_line, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
"""Overrides orm create method
"""
member_line_obj = self.pool.get('membership.membership_line')
result = super(account_invoice_line, self).create(cr, uid, vals, context=context)
line = self.browse(cr, uid, result, context=context)
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line is a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,196,029,208,628,437,000 | 49.089606 | 207 | 0.546297 | false |
XiaosongWei/crosswalk-test-suite | apptools/apptools-android-tests/apptools/create_basic.py | 3 | 2943 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<[email protected]>
# Yun, Liu<[email protected]>
import unittest
import os
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_dir_exist(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app create org.xwalk.test --android-crosswalk=" + \
comm.crosswalkVersion
return_code = os.system(cmd)
self.assertNotEquals(return_code, 0)
comm.clear("org.xwalk.test")
def test_main_activity(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test/prj/android')
fp = open(os.getcwd() + '/AndroidManifest.xml')
lines = fp.readlines()
for i in range(len(lines)):
line = lines[i].strip(' ').strip('\n\t')
findLine = "<activity"
if i <= len(lines):
if findLine in line:
print "Find"
start = line.index("name")
self.assertIn('MainActivity', line[start:])
break
else:
print "Continue find"
else:
self.assertIn(findLine, line)
fp.close()
comm.clear("org.xwalk.test")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,077,379,759,572,491,000 | 38.77027 | 80 | 0.66055 | false |
williamthegrey/swift | test/unit/common/middleware/test_xprofile.py | 5 | 23334 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import tempfile
import unittest
from nose import SkipTest
from six import BytesIO
from swift import gettext_ as _
from swift.common.swob import Request, Response
try:
from swift.common.middleware import xprofile
from swift.common.middleware.xprofile import ProfileMiddleware
from swift.common.middleware.x_profile.exceptions import (
MethodNotAllowed, NotFoundException, ODFLIBNotInstalled,
PLOTLIBNotInstalled)
from swift.common.middleware.x_profile.html_viewer import (
HTMLViewer, PLOTLIB_INSTALLED)
from swift.common.middleware.x_profile.profile_model import (
ODFLIB_INSTALLED, ProfileLog, Stats2)
except ImportError:
xprofile = None
class FakeApp(object):
def __call__(self, env, start_response):
req = Request(env)
return Response(request=req, body='FAKE APP')(
env, start_response)
class TestXProfile(unittest.TestCase):
def test_get_profiler(self):
if xprofile is None:
raise SkipTest
self.assertTrue(xprofile.get_profiler('cProfile') is not None)
self.assertTrue(xprofile.get_profiler('eventlet.green.profile')
is not None)
class TestProfilers(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.profilers = [xprofile.get_profiler('cProfile'),
xprofile.get_profiler('eventlet.green.profile')]
def fake_func(self, *args, **kw):
return len(args) + len(kw)
def test_runcall(self):
for p in self.profilers:
v = p.runcall(self.fake_func, 'one', 'two', {'key1': 'value1'})
self.assertEqual(v, 3)
def test_runctx(self):
for p in self.profilers:
p.runctx('import os;os.getcwd();', globals(), locals())
p.snapshot_stats()
self.assertTrue(p.stats is not None)
self.assertTrue(len(p.stats.keys()) > 0)
class TestProfileMiddleware(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.got_statuses = []
self.app = ProfileMiddleware(FakeApp, {})
self.tempdir = os.path.dirname(self.app.log_filename_prefix)
self.pids = ['123', '456', str(os.getpid())]
profiler = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids:
path = self.app.log_filename_prefix + pid
profiler.runctx('import os;os.getcwd();', globals(), locals())
profiler.dump_stats(path)
profiler.runctx('import os;os.getcwd();', globals(), locals())
profiler.dump_stats(path + '.tmp')
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def get_app(self, app, global_conf, **local_conf):
factory = xprofile.filter_factory(global_conf, **local_conf)
return factory(app)
def start_response(self, status, headers):
self.got_statuses = [status]
self.headers = headers
def test_combine_body_qs(self):
body = (b"profile=all&sort=time&limit=-1&fulldirs=1"
b"&nfl_filter=__call__&query=query&metric=nc&format=default")
wsgi_input = BytesIO(body)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all&format=json',
'wsgi.input': wsgi_input}
req = Request.blank('/__profile__/', environ=environ)
query_dict = self.app._combine_body_qs(req)
self.assertEqual(query_dict['profile'], ['all'])
self.assertEqual(query_dict['sort'], ['time'])
self.assertEqual(query_dict['limit'], ['-1'])
self.assertEqual(query_dict['fulldirs'], ['1'])
self.assertEqual(query_dict['nfl_filter'], ['__call__'])
self.assertEqual(query_dict['query'], ['query'])
self.assertEqual(query_dict['metric'], ['nc'])
self.assertEqual(query_dict['format'], ['default'])
def test_call(self):
body = b"sort=time&limit=-1&fulldirs=1&nfl_filter=&metric=nc"
wsgi_input = BytesIO(body + b'&query=query')
environ = {'HTTP_HOST': 'localhost:8080',
'PATH_INFO': '/__profile__',
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all&format=json',
'wsgi.input': wsgi_input}
resp = self.app(environ, self.start_response)
self.assertTrue(resp[0].find('<html>') > 0, resp)
self.assertEqual(self.got_statuses, ['200 OK'])
self.assertEqual(self.headers, [('content-type', 'text/html')])
wsgi_input = BytesIO(body + b'&plot=plot')
environ['wsgi.input'] = wsgi_input
if PLOTLIB_INSTALLED:
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['200 OK'])
self.assertEqual(self.headers, [('content-type', 'image/jpg')])
else:
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['500 Internal Server Error'])
wsgi_input = BytesIO(body + '&download=download&format=default')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertEqual(self.headers, [('content-type',
HTMLViewer.format_dict['default'])])
wsgi_input = BytesIO(body + '&download=download&format=json')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertTrue(self.headers == [('content-type',
HTMLViewer.format_dict['json'])])
env2 = environ.copy()
env2['REQUEST_METHOD'] = 'DELETE'
resp = self.app(env2, self.start_response)
self.assertEqual(self.got_statuses, ['405 Method Not Allowed'], resp)
# use a totally bogus profile identifier
wsgi_input = BytesIO(body + b'&profile=ABC&download=download')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
self.assertEqual(self.got_statuses, ['404 Not Found'], resp)
wsgi_input = BytesIO(body + b'&download=download&format=ods')
environ['wsgi.input'] = wsgi_input
resp = self.app(environ, self.start_response)
if ODFLIB_INSTALLED:
self.assertEqual(self.headers, [('content-type',
HTMLViewer.format_dict['ods'])])
else:
self.assertEqual(self.got_statuses, ['500 Internal Server Error'])
def test_dump_checkpoint(self):
self.app.dump_checkpoint()
self.assertTrue(self.app.last_dump_at is not None)
def test_renew_profile(self):
old_profiler = self.app.profiler
self.app.renew_profile()
new_profiler = self.app.profiler
self.assertTrue(old_profiler != new_profiler)
class Test_profile_log(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.dir1 = tempfile.mkdtemp()
self.log_filename_prefix1 = self.dir1 + '/unittest.profile'
self.profile_log1 = ProfileLog(self.log_filename_prefix1, False)
self.pids1 = ['123', '456', str(os.getpid())]
profiler1 = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids1:
profiler1.runctx('import os;os.getcwd();', globals(), locals())
self.profile_log1.dump_profile(profiler1, pid)
self.dir2 = tempfile.mkdtemp()
self.log_filename_prefix2 = self.dir2 + '/unittest.profile'
self.profile_log2 = ProfileLog(self.log_filename_prefix2, True)
self.pids2 = ['321', '654', str(os.getpid())]
profiler2 = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids2:
profiler2.runctx('import os;os.getcwd();', globals(), locals())
self.profile_log2.dump_profile(profiler2, pid)
def tearDown(self):
self.profile_log1.clear('all')
self.profile_log2.clear('all')
shutil.rmtree(self.dir1, ignore_errors=True)
shutil.rmtree(self.dir2, ignore_errors=True)
def test_get_all_pids(self):
self.assertEqual(self.profile_log1.get_all_pids(),
sorted(self.pids1, reverse=True))
for pid in self.profile_log2.get_all_pids():
self.assertTrue(pid.split('-')[0] in self.pids2)
def test_clear(self):
self.profile_log1.clear('123')
self.assertFalse(os.path.exists(self.log_filename_prefix1 + '123'))
self.profile_log1.clear('current')
self.assertFalse(os.path.exists(self.log_filename_prefix1 +
str(os.getpid())))
self.profile_log1.clear('all')
for pid in self.pids1:
self.assertFalse(os.path.exists(self.log_filename_prefix1 + pid))
self.profile_log2.clear('321')
self.assertFalse(os.path.exists(self.log_filename_prefix2 + '321'))
self.profile_log2.clear('current')
self.assertFalse(os.path.exists(self.log_filename_prefix2 +
str(os.getpid())))
self.profile_log2.clear('all')
for pid in self.pids2:
self.assertFalse(os.path.exists(self.log_filename_prefix2 + pid))
def test_get_logfiles(self):
log_files = self.profile_log1.get_logfiles('all')
self.assertEqual(len(log_files), 3)
self.assertEqual(len(log_files), len(self.pids1))
log_files = self.profile_log1.get_logfiles('current')
self.assertEqual(len(log_files), 1)
self.assertEqual(log_files, [self.log_filename_prefix1
+ str(os.getpid())])
log_files = self.profile_log1.get_logfiles(self.pids1[0])
self.assertEqual(len(log_files), 1)
self.assertEqual(log_files, [self.log_filename_prefix1
+ self.pids1[0]])
log_files = self.profile_log2.get_logfiles('all')
self.assertEqual(len(log_files), 3)
self.assertEqual(len(log_files), len(self.pids2))
log_files = self.profile_log2.get_logfiles('current')
self.assertEqual(len(log_files), 1)
self.assertTrue(log_files[0].find(self.log_filename_prefix2 +
str(os.getpid())) > -1)
log_files = self.profile_log2.get_logfiles(self.pids2[0])
self.assertEqual(len(log_files), 1)
self.assertTrue(log_files[0].find(self.log_filename_prefix2 +
self.pids2[0]) > -1)
def test_dump_profile(self):
prof = xprofile.get_profiler('eventlet.green.profile')
prof.runctx('import os;os.getcwd();', globals(), locals())
prof.create_stats()
pfn = self.profile_log1.dump_profile(prof, os.getpid())
self.assertTrue(os.path.exists(pfn))
os.remove(pfn)
pfn = self.profile_log2.dump_profile(prof, os.getpid())
self.assertTrue(os.path.exists(pfn))
os.remove(pfn)
class Test_html_viewer(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.app = ProfileMiddleware(FakeApp, {})
self.log_files = []
self.tempdir = tempfile.mkdtemp()
self.log_filename_prefix = self.tempdir + '/unittest.profile'
self.profile_log = ProfileLog(self.log_filename_prefix, False)
self.pids = ['123', '456', str(os.getpid())]
profiler = xprofile.get_profiler('eventlet.green.profile')
for pid in self.pids:
profiler.runctx('import os;os.getcwd();', globals(), locals())
self.log_files.append(self.profile_log.dump_profile(profiler, pid))
self.viewer = HTMLViewer('__profile__', 'eventlet.green.profile',
self.profile_log)
body = (b"profile=123&profile=456&sort=time&sort=nc&limit=10"
b"&fulldirs=1&nfl_filter=getcwd&query=query&metric=nc")
wsgi_input = BytesIO(body)
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'profile=all',
'wsgi.input': wsgi_input}
req = Request.blank('/__profile__/', environ=environ)
self.query_dict = self.app._combine_body_qs(req)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def fake_call_back(self):
pass
def test_get_param(self):
query_dict = self.query_dict
get_param = self.viewer._get_param
self.assertEqual(get_param(query_dict, 'profile', 'current', True),
['123', '456'])
self.assertEqual(get_param(query_dict, 'profile', 'current'), '123')
self.assertEqual(get_param(query_dict, 'sort', 'time'), 'time')
self.assertEqual(get_param(query_dict, 'sort', 'time', True),
['time', 'nc'])
self.assertEqual(get_param(query_dict, 'limit', -1), 10)
self.assertEqual(get_param(query_dict, 'fulldirs', '0'), '1')
self.assertEqual(get_param(query_dict, 'nfl_filter', ''), 'getcwd')
self.assertEqual(get_param(query_dict, 'query', ''), 'query')
self.assertEqual(get_param(query_dict, 'metric', 'time'), 'nc')
self.assertEqual(get_param(query_dict, 'format', 'default'), 'default')
def test_render(self):
url = 'http://localhost:8080/__profile__'
path_entries = ['/__profile__'.split('/'),
'/__profile__/'.split('/'),
'/__profile__/123'.split('/'),
'/__profile__/123/'.split('/'),
'/__profile__/123/:0(getcwd)'.split('/'),
'/__profile__/all'.split('/'),
'/__profile__/all/'.split('/'),
'/__profile__/all/:0(getcwd)'.split('/'),
'/__profile__/current'.split('/'),
'/__profile__/current/'.split('/'),
'/__profile__/current/:0(getcwd)'.split('/')]
content, headers = self.viewer.render(url, 'GET', path_entries[0],
self.query_dict, None)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'text/html')])
content, headers = self.viewer.render(url, 'POST', path_entries[0],
self.query_dict, None)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'text/html')])
plot_dict = self.query_dict.copy()
plot_dict['plot'] = ['plot']
if PLOTLIB_INSTALLED:
content, headers = self.viewer.render(url, 'POST', path_entries[0],
plot_dict, None)
self.assertEqual(headers, [('content-type', 'image/jpg')])
else:
self.assertRaises(PLOTLIBNotInstalled, self.viewer.render,
url, 'POST', path_entries[0], plot_dict, None)
clear_dict = self.query_dict.copy()
clear_dict['clear'] = ['clear']
del clear_dict['query']
clear_dict['profile'] = ['xxx']
content, headers = self.viewer.render(url, 'POST', path_entries[0],
clear_dict, None)
self.assertEqual(headers, [('content-type', 'text/html')])
download_dict = self.query_dict.copy()
download_dict['download'] = ['download']
content, headers = self.viewer.render(url, 'POST', path_entries[0],
download_dict, None)
self.assertTrue(headers == [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.render(url, 'GET', path_entries[1],
self.query_dict, None)
self.assertTrue(isinstance(json.loads(content), dict))
for method in ['HEAD', 'PUT', 'DELETE', 'XYZMethod']:
self.assertRaises(MethodNotAllowed, self.viewer.render, url,
method, path_entries[10], self.query_dict, None)
for entry in path_entries[2:]:
download_dict['format'] = 'default'
content, headers = self.viewer.render(url, 'GET', entry,
download_dict, None)
self.assertTrue(
('content-type', self.viewer.format_dict['default'])
in headers, entry)
download_dict['format'] = 'json'
content, headers = self.viewer.render(url, 'GET', entry,
download_dict, None)
self.assertTrue(isinstance(json.loads(content), dict))
def test_index(self):
content, headers = self.viewer.index_page(self.log_files[0:1],
profile_id='current')
self.assertTrue(content.find('<html>') > -1)
self.assertTrue(headers == [('content-type', 'text/html')])
def test_index_all(self):
content, headers = self.viewer.index_page(self.log_files,
profile_id='all')
for f in self.log_files:
self.assertTrue(content.find(f) > 0, content)
self.assertTrue(headers == [('content-type', 'text/html')])
def test_download(self):
content, headers = self.viewer.download(self.log_files)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files, sort='calls',
limit=10, nfl_filter='os')
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files,
output_format='default')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['default'])])
content, headers = self.viewer.download(self.log_files,
output_format='json')
self.assertTrue(isinstance(json.loads(content), dict))
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['json'])])
content, headers = self.viewer.download(self.log_files,
output_format='csv')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['csv'])])
if ODFLIB_INSTALLED:
content, headers = self.viewer.download(self.log_files,
output_format='ods')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['ods'])])
else:
self.assertRaises(ODFLIBNotInstalled, self.viewer.download,
self.log_files, output_format='ods')
content, headers = self.viewer.download(self.log_files,
nfl_filter=__file__,
output_format='python')
self.assertEqual(headers, [('content-type',
self.viewer.format_dict['python'])])
def test_plot(self):
if PLOTLIB_INSTALLED:
content, headers = self.viewer.plot(self.log_files)
self.assertTrue(content is not None)
self.assertEqual(headers, [('content-type', 'image/jpg')])
self.assertRaises(NotFoundException, self.viewer.plot, [])
else:
self.assertRaises(PLOTLIBNotInstalled, self.viewer.plot,
self.log_files)
def test_format_source_code(self):
osfile = os.__file__.rstrip('c')
nfl_os = '%s:%d(%s)' % (osfile, 136, 'makedirs')
self.assertIn('makedirs', self.viewer.format_source_code(nfl_os))
self.assertNotIn('makedirsXYZ', self.viewer.format_source_code(nfl_os))
nfl_illegal = '%sc:136(makedirs)' % osfile
self.assertIn(_('The file type are forbidden to access!'),
self.viewer.format_source_code(nfl_illegal))
nfl_not_exist = '%s.py:136(makedirs)' % osfile
expected_msg = _('Can not access the file %s.py.') % osfile
self.assertIn(expected_msg,
self.viewer.format_source_code(nfl_not_exist))
class TestStats2(unittest.TestCase):
def setUp(self):
if xprofile is None:
raise SkipTest
self.profile_file = tempfile.mktemp('profile', 'unittest')
self.profilers = [xprofile.get_profiler('cProfile'),
xprofile.get_profiler('eventlet.green.profile')]
for p in self.profilers:
p.runctx('import os;os.getcwd();', globals(), locals())
p.dump_stats(self.profile_file)
self.stats2 = Stats2(self.profile_file)
self.selections = [['getcwd'], ['getcwd', -1],
['getcwd', -10], ['getcwd', 0.1]]
def tearDown(self):
os.remove(self.profile_file)
def test_func_to_dict(self):
func = ['profile.py', 100, '__call__']
self.assertEqual({'module': 'profile.py', 'line': 100, 'function':
'__call__'}, self.stats2.func_to_dict(func))
func = ['', 0, '__call__']
self.assertEqual({'module': '', 'line': 0, 'function':
'__call__'}, self.stats2.func_to_dict(func))
def test_to_json(self):
for selection in self.selections:
js = self.stats2.to_json(selection)
self.assertTrue(isinstance(json.loads(js), dict))
self.assertTrue(json.loads(js)['stats'] is not None)
self.assertTrue(json.loads(js)['stats'][0] is not None)
def test_to_ods(self):
if ODFLIB_INSTALLED:
for selection in self.selections:
self.assertTrue(self.stats2.to_ods(selection) is not None)
def test_to_csv(self):
for selection in self.selections:
self.assertTrue(self.stats2.to_csv(selection) is not None)
self.assertTrue('function calls' in self.stats2.to_csv(selection))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,307,352,246,750,098,000 | 43.959538 | 79 | 0.566984 | false |
ttanner/kryptomime | tests/test_smime.py | 1 | 3239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# S/MIME unit tests
#
# This file is part of kryptomime, a Python module for email kryptography.
# Copyright © 2013,2014 Thomas Tanner <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For more details see the file COPYING.
from pytest import fixture, mark, raises
from kryptomime import KeyMissingError
from kryptomime.mail import create_mail, protect_mail
from kryptomime.smime import OpenSMIME, Certificate, PrivateKey, MemoryKeyStore, OpenSSL, OpenSSL_CA
import email.mime.text
from conftest import sender, receiver
from test_openssl import x509keys, openssl
passphrase='mysecret'
attachment = email.mime.text.MIMEText('some\nattachment')
msg = create_mail(sender,receiver,'subject','body\nmessage')
msg.epilogue=''
msgatt = create_mail(sender,receiver,'subject','body\nmessage',attach=[attachment])
msgrev = create_mail(receiver,sender,'subject','body\nmessage')
msgself = create_mail(sender,sender,'subject','body\nmessage')
prot = protect_mail(msg,linesep='\r\n')
protatt = protect_mail(msgatt,linesep='\r\n')
def compare_mail(a,b):
if type(a)==str: return a==b
assert a.is_multipart() == b.is_multipart()
#from kryptomime.mail import ProtectedMessage
#assert isinstance(a,ProtectedMessage)==isinstance(b,ProtectedMessage)
# todo headers
if a.is_multipart():
for i in range(len(a.get_payload())):
ap = a.get_payload(i)
bp = b.get_payload(i)
assert ap.as_string() == bp.as_string()
else:
assert a.get_payload() == b.get_payload()
@fixture(scope='module')
def smimesender(x509keys,openssl):
return (OpenSMIME(openssl=openssl,default_key=x509keys[0]),x509keys[0].cacerts)
@fixture(scope='module')
def smimereceiver(x509keys,openssl):
return (OpenSMIME(openssl=openssl,default_key=x509keys[1]),x509keys[0].cacerts)
@mark.parametrize("attach", [False,True])
def test_sign(x509keys, attach, smimesender, smimereceiver):
id1, cacert1 = smimesender
id2, cacert2 = smimereceiver
mail = protatt if attach else prot
sgn = id1.sign(mail)
vfy, signer, valid = id2.verify(sgn,cacerts=cacert1)
assert valid and x509keys[0].cert == signer
compare_mail(mail,vfy)
@mark.parametrize("sign", [False,True])
def test_encrypt(x509keys, sign, smimesender, smimereceiver):
id1, cacert1 = smimesender
id2, cacert2 = smimereceiver
enc = id1.encrypt(protatt,[x509keys[1]],sign=sign, verify=True)
dec = id2.decrypt(enc,verify=sign,cacerts=cacert1)
if sign:
dec, signer, valid = dec
assert valid and x509keys[0].cert == signer
compare_mail(protatt,dec)
| lgpl-3.0 | 1,652,476,232,261,151,500 | 37.547619 | 100 | 0.722977 | false |
Pythonity/icon-font-to-png | icon_font_to_png/icon_font.py | 1 | 6185 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import re
from collections import OrderedDict
import tinycss
from PIL import Image, ImageFont, ImageDraw
from six import unichr
class IconFont(object):
"""Base class that represents web icon font"""
def __init__(self, css_file, ttf_file, keep_prefix=False):
"""
:param css_file: path to icon font CSS file
:param ttf_file: path to icon font TTF file
:param keep_prefix: whether to keep common icon prefix
"""
self.css_file = css_file
self.ttf_file = ttf_file
self.keep_prefix = keep_prefix
self.css_icons, self.common_prefix = self.load_css()
def load_css(self):
"""
Creates a dict of all icons available in CSS file, and finds out
what's their common prefix.
:returns sorted icons dict, common icon prefix
"""
icons = dict()
common_prefix = None
parser = tinycss.make_parser('page3')
stylesheet = parser.parse_stylesheet_file(self.css_file)
is_icon = re.compile("\.(.*):before,?")
for rule in stylesheet.rules:
selector = rule.selector.as_css()
# Skip CSS classes that are not icons
if not is_icon.match(selector):
continue
# Find out what the common prefix is
if common_prefix is None:
common_prefix = selector[1:]
else:
common_prefix = os.path.commonprefix((common_prefix,
selector[1:]))
for match in is_icon.finditer(selector):
name = match.groups()[0]
for declaration in rule.declarations:
if declaration.name == "content":
val = declaration.value.as_css()
# Strip quotation marks
if re.match("^['\"].*['\"]$", val):
val = val[1:-1]
icons[name] = unichr(int(val[1:], 16))
common_prefix = common_prefix or ''
# Remove common prefix
if not self.keep_prefix and len(common_prefix) > 0:
non_prefixed_icons = {}
for name in icons.keys():
non_prefixed_icons[name[len(common_prefix):]] = icons[name]
icons = non_prefixed_icons
sorted_icons = OrderedDict(sorted(icons.items(), key=lambda t: t[0]))
return sorted_icons, common_prefix
def export_icon(self, icon, size, color='black', scale='auto',
filename=None, export_dir='exported'):
"""
Exports given icon with provided parameters.
If the desired icon size is less than 150x150 pixels, we will first
create a 150x150 pixels image and then scale it down, so that
it's much less likely that the edges of the icon end up cropped.
:param icon: valid icon name
:param filename: name of the output file
:param size: icon size in pixels
:param color: color name or hex value
:param scale: scaling factor between 0 and 1,
or 'auto' for automatic scaling
:param export_dir: path to export directory
"""
org_size = size
size = max(150, size)
image = Image.new("RGBA", (size, size), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(image)
if scale == 'auto':
scale_factor = 1
else:
scale_factor = float(scale)
font = ImageFont.truetype(self.ttf_file, int(size * scale_factor))
width, height = draw.textsize(self.css_icons[icon], font=font)
# If auto-scaling is enabled, we need to make sure the resulting
# graphic fits inside the boundary. The values are rounded and may be
# off by a pixel or two, so we may need to do a few iterations.
# The use of a decrementing multiplication factor protects us from
# getting into an infinite loop.
if scale == 'auto':
iteration = 0
factor = 1
while True:
width, height = draw.textsize(self.css_icons[icon], font=font)
# Check if the image fits
dim = max(width, height)
if dim > size:
font = ImageFont.truetype(self.ttf_file,
int(size * size/dim * factor))
else:
break
# Adjust the factor every two iterations
iteration += 1
if iteration % 2 == 0:
factor *= 0.99
draw.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=color)
# Get bounding box
bbox = image.getbbox()
# Create an alpha mask
image_mask = Image.new("L", (size, size), 0)
draw_mask = ImageDraw.Draw(image_mask)
# Draw the icon on the mask
draw_mask.text((float(size - width) / 2, float(size - height) / 2),
self.css_icons[icon], font=font, fill=255)
# Create a solid color image and apply the mask
icon_image = Image.new("RGBA", (size, size), color)
icon_image.putalpha(image_mask)
if bbox:
icon_image = icon_image.crop(bbox)
border_w = int((size - (bbox[2] - bbox[0])) / 2)
border_h = int((size - (bbox[3] - bbox[1])) / 2)
# Create output image
out_image = Image.new("RGBA", (size, size), (0, 0, 0, 0))
out_image.paste(icon_image, (border_w, border_h))
# If necessary, scale the image to the target size
if org_size != size:
out_image = out_image.resize((org_size, org_size), Image.ANTIALIAS)
# Make sure export directory exists
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# Default filename
if not filename:
filename = icon + '.png'
# Save file
out_image.save(os.path.join(export_dir, filename))
| mit | -5,177,220,675,095,560,000 | 34.342857 | 79 | 0.546645 | false |
h3biomed/ansible | lib/ansible/modules/storage/netapp/na_ontap_lun_copy.py | 28 | 5680 | #!/usr/bin/python
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_lun_copy
short_description: NetApp ONTAP copy LUNs
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.8'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Copy LUNs on NetApp ONTAP.
options:
state:
description:
- Whether the specified LUN should exist or not.
choices: ['present']
default: present
destination_vserver:
description:
- the name of the Vserver that will host the new LUN.
required: true
destination_path:
description:
- Specifies the full path to the new LUN.
required: true
source_path:
description:
- Specifies the full path to the source LUN.
required: true
source_vserver:
description:
- Specifies the name of the vserver hosting the LUN to be copied.
'''
EXAMPLES = """
- name: Copy LUN
na_ontap_lun_copy:
destination_vserver: ansible
destination_path: /vol/test/test_copy_dest_dest_new
source_path: /vol/test/test_copy_1
source_vserver: ansible
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp_module import NetAppModule
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapLUNCopy(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
destination_vserver=dict(required=True, type='str'),
destination_path=dict(required=True, type='str'),
source_path=dict(required=True, type='str'),
source_vserver=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['destination_vserver'])
def get_lun(self):
"""
Check if the LUN exists
:return: true is it exists, false otherwise
:rtype: bool
"""
return_value = False
lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('path', self.parameters['destination_path'])
query_details.add_new_child('vserver', self.parameters['destination_vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_info.add_child_elem(query)
try:
result = self.server.invoke_successfully(lun_info, True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error getting lun info %s for verver %s: %s" %
(self.parameters['destination_path'], self.parameters['destination_vserver'], to_native(e)),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return_value = True
return return_value
def copy_lun(self):
"""
Copy LUN with requested path and vserver
"""
lun_copy = netapp_utils.zapi.NaElement.create_node_with_children(
'lun-copy-start', **{'source-vserver': self.parameters['source_vserver']})
path_obj = netapp_utils.zapi.NaElement('paths')
pair = netapp_utils.zapi.NaElement('lun-path-pair')
pair.add_new_child('destination-path', self.parameters['destination_path'])
pair.add_new_child('source-path', self.parameters['source_path'])
path_obj.add_child_elem(pair)
lun_copy.add_child_elem(path_obj)
try:
self.server.invoke_successfully(lun_copy, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error copying lun from %s to vserver %s: %s" %
(self.parameters['source_vserver'], self.parameters['destination_vserver'], to_native(e)),
exception=traceback.format_exc())
def apply(self):
netapp_utils.ems_log_event("na_ontap_lun_copy", self.server)
if self.get_lun(): # lun already exists at destination
changed = False
else:
changed = True
if self.module.check_mode:
pass
else:
# need to copy lun
if self.parameters['state'] == 'present':
self.copy_lun()
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapLUNCopy()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | 3,392,423,968,817,683,000 | 30.038251 | 130 | 0.619718 | false |
aabbox/kbengine | kbe/res/scripts/common/Lib/test/test__osx_support.py | 72 | 11776 | """
Test suite for _osx_support: shared OS X support functions.
"""
import os
import platform
import shutil
import stat
import sys
import unittest
import test.support
import _osx_support
@unittest.skipUnless(sys.platform.startswith("darwin"), "requires OS X")
class Test_OSXSupport(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.prog_name = 'bogus_program_xxxx'
self.temp_path_dir = os.path.abspath(os.getcwd())
self.env = test.support.EnvironmentVarGuard()
self.addCleanup(self.env.__exit__)
for cv in ('CFLAGS', 'LDFLAGS', 'CPPFLAGS',
'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC',
'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS'):
if cv in self.env:
self.env.unset(cv)
def add_expected_saved_initial_values(self, config_vars, expected_vars):
# Ensure that the initial values for all modified config vars
# are also saved with modified keys.
expected_vars.update(('_OSX_SUPPORT_INITIAL_'+ k,
config_vars[k]) for k in config_vars
if config_vars[k] != expected_vars[k])
def test__find_executable(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.assertIsNone(_osx_support._find_executable(self.prog_name))
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo OK\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual(self.prog_name,
_osx_support._find_executable(self.prog_name))
def test__read_output(self):
if self.env['PATH']:
self.env['PATH'] = self.env['PATH'] + ':'
self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
test.support.unlink(self.prog_name)
self.addCleanup(test.support.unlink, self.prog_name)
with open(self.prog_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo ExpectedOutput\n")
os.chmod(self.prog_name, stat.S_IRWXU)
self.assertEqual('ExpectedOutput',
_osx_support._read_output(self.prog_name))
def test__find_build_tool(self):
out = _osx_support._find_build_tool('cc')
self.assertTrue(os.path.isfile(out),
'cc not found - check xcode-select')
def test__get_system_version(self):
self.assertTrue(platform.mac_ver()[0].startswith(
_osx_support._get_system_version()))
def test__remove_original_values(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertNotEqual(expected_vars, config_vars)
_osx_support._remove_original_values(config_vars)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = {
'CC': 'clang -pthreads',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
cv = 'CC'
newvalue = 'clang -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__save_modified_value_unchanged(self):
config_vars = {
'CC': 'gcc-test -pthreads',
}
expected_vars = config_vars.copy()
cv = 'CC'
newvalue = 'gcc-test -pthreads'
_osx_support._save_modified_value(config_vars, cv, newvalue)
self.assertEqual(expected_vars, config_vars)
def test__supports_universal_builds(self):
import platform
mac_ver_tuple = tuple(int(i) for i in
platform.mac_ver()[0].split('.')[0:2])
self.assertEqual(mac_ver_tuple >= (10, 4),
_osx_support._supports_universal_builds())
def test__find_appropriate_compiler(self):
compilers = (
('gcc-test', 'i686-apple-darwin11-llvm-gcc-4.2'),
('clang', 'clang version 3.1'),
)
config_vars = {
'CC': 'gcc-test -pthreads',
'CXX': 'cc++-test',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-test -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-test -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang -pthreads',
'CXX': 'clang++',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'clang -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'clang -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
for c_name, c_output in compilers:
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
with open(c_name, 'w') as f:
f.write("#!/bin/sh\n/bin/echo " + c_output)
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._find_appropriate_compiler(
config_vars))
def test__remove_universal_flags(self):
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 ',
'LDFLAGS': ' -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -g',
'LDSHARED': 'gcc-4.0 -bundle -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._remove_universal_flags(
config_vars))
def test__remove_unsupported_archs(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch i386 ',
'LDFLAGS': ' -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
c_name = 'clang'
test.support.unlink(c_name)
self.addCleanup(test.support.unlink, c_name)
# exit status 255 means no PPC support in this compiler chain
with open(c_name, 'w') as f:
f.write("#!/bin/sh\nexit 255")
os.chmod(c_name, stat.S_IRWXU)
self.assertEqual(expected_vars,
_osx_support._remove_unsupported_archs(
config_vars))
def test__override_all_archs(self):
self.env['ARCHFLAGS'] = '-arch x86_64'
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch x86_64',
'LDFLAGS': ' -g -arch x86_64',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -g -arch x86_64',
'LDSHARED': 'gcc-4.0 -bundle -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk -g -arch x86_64',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._override_all_archs(
config_vars))
def test__check_for_unavailable_sdk(self):
config_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.1.sdk',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk -g',
}
expected_vars = {
'CC': 'clang',
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
' ',
'LDFLAGS': '-arch ppc -arch i386 -g',
'CPPFLAGS': '-I. ',
'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
' -g',
}
self.add_expected_saved_initial_values(config_vars, expected_vars)
self.assertEqual(expected_vars,
_osx_support._check_for_unavailable_sdk(
config_vars))
def test_get_platform_osx(self):
# Note, get_platform_osx is currently tested more extensively
# indirectly by test_sysconfig and test_distutils
config_vars = {
'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
'-isysroot /Developer/SDKs/MacOSX10.1.sdk',
'MACOSX_DEPLOYMENT_TARGET': '10.6',
}
result = _osx_support.get_platform_osx(config_vars, ' ', ' ', ' ')
self.assertEqual(('macosx', '10.6', 'fat'), result)
def test_main():
if sys.platform == 'darwin':
test.support.run_unittest(Test_OSXSupport)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | 230,854,177,398,231,500 | 40.907473 | 81 | 0.538978 | false |
tokenly/counterparty-lib | counterpartylib/lib/messages/execute.py | 3 | 7034 | #! /usr/bin/python3
"""Execute arbitrary data as a smart contract."""
import struct
import binascii
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import (util, config, exceptions)
from .scriptlib import (utils, blocks, processblock)
FORMAT = '>20sQQQ'
LENGTH = 44
ID = 101
def initialise (db):
cursor = db.cursor()
# Executions
cursor.execute('''CREATE TABLE IF NOT EXISTS executions(
tx_index INTEGER UNIQUE,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
contract_id TEXT,
gas_price INTEGER,
gas_start INTEGER,
gas_cost INTEGER,
gas_remained INTEGER,
value INTEGER,
data BLOB,
output BLOB,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON executions(source)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx_hash_idx ON executions(tx_hash)
''')
# Contract Storage
cursor.execute('''CREATE TABLE IF NOT EXISTS storage(
contract_id TEXT,
key BLOB,
value BLOB,
FOREIGN KEY (contract_id) REFERENCES contracts(contract_id))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
contract_id_idx ON contracts(contract_id)
''')
# Suicides
cursor.execute('''CREATE TABLE IF NOT EXISTS suicides(
contract_id TEXT PRIMARY KEY,
FOREIGN KEY (contract_id) REFERENCES contracts(contract_id))
''')
# Nonces
cursor.execute('''CREATE TABLE IF NOT EXISTS nonces(
address TEXT PRIMARY KEY,
nonce INTEGER)
''')
# Postqueue
cursor.execute('''CREATE TABLE IF NOT EXISTS postqueue(
message BLOB)
''')
def compose (db, source, contract_id, gasprice, startgas, value, payload_hex):
if not config.TESTNET: # TODO
return
payload = binascii.unhexlify(payload_hex)
if startgas < 0:
raise processblock.ContractError('negative startgas')
if gasprice < 0:
raise processblock.ContractError('negative gasprice')
# Pack.
data = struct.pack(config.TXTYPE_FORMAT, ID)
curr_format = FORMAT + '{}s'.format(len(payload))
data += struct.pack(curr_format, binascii.unhexlify(contract_id), gasprice, startgas, value, payload)
return (source, [], data)
class Transaction(object):
def __init__(self, tx, to, gasprice, startgas, value, data):
assert type(data) == bytes
self.block_index = tx['block_index']
self.tx_hash = tx['tx_hash']
self.tx_index = tx['tx_index']
self.sender = tx['source']
self.data = data
self.to = to
self.gasprice = gasprice
self.startgas = startgas
self.value = value
self.timestamp = tx['block_time']
def hex_hash(self):
return '<None>'
def to_dict(self):
dict_ = {
'sender': self.sender,
'data': utils.hexprint(self.data),
'to': self.to,
'gasprice': self.gasprice,
'startgas': self.startgas,
'value': self.value
}
return dict_
def parse (db, tx, message):
if not config.TESTNET: # TODO
return
status = 'valid'
output, gas_cost, gas_remained = None, None, None
try:
# TODO: Use unpack function.
# Unpack message.
curr_format = FORMAT + '{}s'.format(len(message) - LENGTH)
try:
contract_id, gasprice, startgas, value, payload = struct.unpack(curr_format, message)
if gasprice > config.MAX_INT or startgas > config.MAX_INT: # TODO: define max for gasprice and startgas
raise exceptions.UnpackError()
except (struct.error) as e:
raise exceptions.UnpackError()
gas_remained = startgas
contract_id = util.hexlify(contract_id)
if contract_id == '0000000000000000000000000000000000000000':
contract_id = ''
# ‘Apply transaction’!
tx_obj = Transaction(tx, contract_id, gasprice, startgas, value, payload)
block_obj = blocks.Block(db, tx['block_hash'])
success, output, gas_remained = processblock.apply_transaction(db, tx_obj, block_obj)
if not success and output == '':
status = 'out of gas'
gas_cost = gasprice * (startgas - gas_remained) # different definition from pyethereum’s
except exceptions.UnpackError as e:
contract_id, gasprice, startgas, value, payload = None, None, None, None, None
status = 'invalid: could not unpack'
output = None
except processblock.ContractError as e:
status = 'invalid: no such contract'
contract_id = None
output = None
except processblock.InsufficientStartGas as e:
have, need = e.args
logger.debug('Insufficient start gas: have {} and need {}'.format(have, need))
status = 'invalid: insufficient start gas'
output = None
except processblock.InsufficientBalance as e:
have, need = e.args
logger.debug('Insufficient balance: have {} and need {}'.format(have, need))
status = 'invalid: insufficient balance'
output = None
except processblock.OutOfGas as e:
logger.debug('TX OUT_OF_GAS (startgas: {}, gas_remained: {})'.format(startgas, gas_remained))
status = 'out of gas'
output = None
finally:
if status == 'valid':
logger.debug('TX FINISHED (gas_remained: {})'.format(gas_remained))
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'contract_id': contract_id,
'gasprice': gasprice,
'startgas': startgas,
'gas_cost': gas_cost,
'gas_remained': gas_remained,
'value': value,
'payload': payload,
'output': output,
'status': status
}
sql='insert into executions values(:tx_index, :tx_hash, :block_index, :source, :contract_id, :gasprice, :startgas, :gas_cost, :gas_remained, :value, :data, :output, :status)'
cursor = db.cursor()
cursor.execute(sql, bindings)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -5,049,471,412,072,452,000 | 34.846939 | 182 | 0.557928 | false |
whereismyjetpack/ansible | lib/ansible/modules/cloud/ovirt/ovirt_snapshots_facts.py | 13 | 4318 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_snapshots_facts
short_description: Retrieve facts about one or more oVirt virtual machine snapshots
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt virtual machine snapshots."
notes:
- "This module creates a new top-level C(ovirt_snapshots) fact, which
contains a list of snapshots."
options:
vm:
description:
- "Name of the VM with snapshot."
required: true
description:
description:
- "Description of the snapshot, can be used as glob expression."
snapshot_id:
description:
- "Id of the snaphost we want to retrieve facts about."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all snapshots which description start with C(update) for VM named C(centos7):
- ovirt_snapshots_facts:
vm: centos7
description: update*
- debug:
var: ovirt_snapshots
'''
RETURN = '''
ovirt_snapshots:
description: "List of dictionaries describing the snapshot. Snapshot attribtues are mapped to dictionary keys,
all snapshot attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/snapshot."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
description=dict(default=None),
snapshot_id=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
snapshots_service = vms_service.service(vm.id).snapshots_service()
if module.params['description']:
snapshots = [
e for e in snapshots_service.list()
if fnmatch.fnmatch(e.description, module.params['description'])
]
elif module.params['snapshot_id']:
snapshots = [
snapshots_service.snapshot_service(module.params['snapshot_id']).get()
]
else:
snapshots = snapshots_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_snapshots=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in snapshots
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,357,778,639,397,574,700 | 30.75 | 138 | 0.6327 | false |
JPFrancoia/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause | -2,102,907,588,695,011,000 | 31.360825 | 79 | 0.607996 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.