repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cernops/nova
|
nova/tests/unit/objects/test_monitor_metric.py
|
11
|
3769
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova import objects
from nova.objects import fields
from nova.tests.unit.objects import test_objects
_ts_now = timeutils.utcnow()
_monitor_metric_spec = {
'name': fields.MonitorMetricType.CPU_FREQUENCY,
'value': 1000,
'timestamp': _ts_now.isoformat(),
'source': 'nova.virt.libvirt.driver'
}
_monitor_metric_perc_spec = {
'name': fields.MonitorMetricType.CPU_PERCENT,
'value': 0.17,
'timestamp': _ts_now.isoformat(),
'source': 'nova.virt.libvirt.driver'
}
_monitor_numa_metric_spec = {
'name': fields.MonitorMetricType.NUMA_MEM_BW_CURRENT,
'numa_membw_values': {"0": 10, "1": 43},
'timestamp': _ts_now.isoformat(),
'source': 'nova.virt.libvirt.driver'
}
_monitor_metric_list_spec = [_monitor_metric_spec]
class _TestMonitorMetricObject(object):
def test_monitor_metric_to_dict(self):
obj = objects.MonitorMetric(name='cpu.frequency',
value=1000,
timestamp=_ts_now,
source='nova.virt.libvirt.driver')
self.assertEqual(_monitor_metric_spec, obj.to_dict())
def test_monitor_metric_perc_to_dict(self):
"""Test to ensure division by 100.0 occurs on percentage value."""
obj = objects.MonitorMetric(name='cpu.percent',
value=17,
timestamp=_ts_now,
source='nova.virt.libvirt.driver')
self.assertEqual(_monitor_metric_perc_spec, obj.to_dict())
def test_monitor_metric_list_to_list(self):
obj = objects.MonitorMetric(name='cpu.frequency',
value=1000,
timestamp=_ts_now,
source='nova.virt.libvirt.driver')
list_obj = objects.MonitorMetricList(objects=[obj])
self.assertEqual(_monitor_metric_list_spec, list_obj.to_list())
def test_monitor_NUMA_metric_to_dict(self):
obj = objects.MonitorMetric(name='numa.membw.current',
numa_membw_values={"0": 10, "1": 43},
timestamp=_ts_now,
source='nova.virt.libvirt.driver')
self.assertEqual(_monitor_numa_metric_spec, obj.to_dict())
def test_conversion_in_monitor_metric_list_from_json(self):
spec_list = [_monitor_metric_spec, _monitor_metric_perc_spec]
metrics = objects.MonitorMetricList.from_json(
jsonutils.dumps(spec_list))
for metric, spec in zip(metrics, spec_list):
exp = spec['value']
if (spec['name'] in
objects.monitor_metric.FIELDS_REQUIRING_CONVERSION):
exp = spec['value'] * 100
self.assertEqual(exp, metric.value)
class TestMonitorMetricObject(test_objects._LocalTest,
_TestMonitorMetricObject):
pass
class TestRemoteMonitorMetricObject(test_objects._RemoteTest,
_TestMonitorMetricObject):
pass
|
apache-2.0
|
tommy-u/chaco
|
examples/demo/logo.py
|
3
|
3210
|
""" LOGO overlay """
from __future__ import with_statement
from numpy import array, cos, invert, isnan, nan, pi, sin, vstack
from traits.api import Array, Enum, Float, Range
from traitsui.api import Group, Item, View
from enable.api import ColorTrait
from chaco.api import arg_find_runs, AbstractOverlay
class Turtle(AbstractOverlay):
x = Float
y = Float
angle = Range(0.0, 360.0, value=90.0) # degrees, clockwise
color = ColorTrait("blue")
line_color = ColorTrait("green")
size = Float(10.0)
path = Array
_pen = Enum("down", "up")
view = View(Group("x", "y", "angle", Item("color", style="custom"),
Item("line_color", style="custom"), "size",
orientation="vertical"))
def __init__(self, component=None, **traits):
super(Turtle, self).__init__(component=component, **traits)
if 'path' not in traits:
self.path = array([self.x, self.y], ndmin=2)
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
self.render(gc, other_component)
def render_turtle(self, gc, component):
with gc:
x, y = component.map_screen(array([self.x, self.y], ndmin=2))[0]
gc.translate_ctm(x, y)
angle = self.angle * pi / 180.0
gc.rotate_ctm(angle)
gc.set_stroke_color(self.color_)
gc.set_fill_color(self.color_)
gc.begin_path()
gc.lines([[-0.707*self.size, 0.707*self.size],
[-0.707*self.size, -0.707*self.size],
[self.size, 0.0]])
gc.fill_path()
def render(self, gc, component):
# Uses the component to map our path into screen space
nan_mask = invert(isnan(self.path[:,0])).astype(int)
blocks = [b for b in arg_find_runs(nan_mask, "flat") if nan_mask[b[0]] != 0]
screen_pts = component.map_screen(self.path)
with gc:
gc.clip_to_rect(component.x, component.y, component.width, component.height)
gc.set_stroke_color(self.line_color_)
for start, end in blocks:
gc.begin_path()
gc.lines(screen_pts[start:end])
gc.stroke_path()
self.render_turtle(gc, component)
def pendown(self):
self._pen = "down"
self.path = vstack((self.path, [self.x, self.y]))
def penup(self):
self.path = vstack((self.path, [nan,nan]))
self._pen = "up"
def forward(self, amt):
angle = self.angle * pi / 180.0
self.x += amt * cos(angle)
self.y += amt * sin(angle)
if self._pen == "down":
self.path = vstack((self.path, [self.x, self.y]))
def back(self, amt):
self.forward(-amt)
def left(self, angle):
self.angle = (self.angle + angle) % 360
def right(self, angle):
self.angle = ((self.angle - angle) + 360) % 360
def clear(self):
self.path = array([self.x, self.y], ndmin=2)
def reset(self):
self.x = self.y = 0.0
self.angle = 90.0
self.clear()
def _anytrait_changed(self, trait, val):
self.component.request_redraw()
|
bsd-3-clause
|
ericblau/ipf-xsede
|
ipf/glue2/sge.py
|
1
|
26848
|
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import subprocess
import datetime
import os
import re
import time
import xml.sax
import xml.sax.handler
from ipf.dt import *
from ipf.error import StepError
from ipf.log import LogFileWatcher
from . import computing_activity
from . import computing_manager
from . import computing_service
from . import computing_share
from . import execution_environment
from . import accelerator_environment
from . import computing_manager_accel_info
from . import computing_share_accel_info
#######################################################################################################################
class ComputingServiceStep(computing_service.ComputingServiceStep):
def __init__(self):
computing_service.ComputingServiceStep.__init__(self)
def _run(self):
service = computing_service.ComputingService()
service.Name = "SGE"
service.Capability = ["executionmanagement.jobexecution",
"executionmanagement.jobdescription",
"executionmanagement.jobmanager",
"executionmanagement.executionandplanning",
"executionmanagement.reservation",
]
service.Type = "org.teragrid.SGE"
service.QualityLevel = "production"
return service
#######################################################################################################################
class ComputingManagerStep(computing_manager.ComputingManagerStep):
def __init__(self):
computing_manager.ComputingManagerStep.__init__(self)
def _run(self):
manager = computing_manager.ComputingManager()
manager.ProductName = "SGE"
manager.Name = "SGE"
manager.Reservation = True
#self.BulkSubmission = True
return manager
#######################################################################################################################
class ComputingActivitiesStep(computing_activity.ComputingActivitiesStep):
def __init__(self):
computing_activity.ComputingActivitiesStep.__init__(self)
self._acceptParameter(
"qstat", "the path to the SGE qstat program (default 'qstat')", False)
def _run(self):
try:
qstat = self.params["qstat"]
except KeyError:
qstat = "qstat"
# the output of -u is in schedule order
cmd = qstat + " -xml -pri -s prsz -u \\*"
self.debug("running "+cmd)
status, output = subprocess.getstatusoutput(cmd)
if status != 0:
self.error("qstat failed: "+output+"\n")
raise StepError("qstat failed: "+output+"\n")
uhandler = JobsUHandler(self)
xml.sax.parseString(output, uhandler)
jobs = {}
for job in uhandler.jobs:
jobs[job.LocalIDFromManager] = job
cmd = qstat + " -xml -s prsz -j \\*"
self.debug("running "+cmd)
status, output = subprocess.getstatusoutput(cmd)
if status != 0:
self.error("qstat failed: "+output+"\n")
raise StepError("qstat failed: "+output+"\n")
# dom parsing was slow
# sax parsing failed sometimes
parseJLines(output, jobs, self)
jobList = []
for job in uhandler.jobs:
if self._includeQueue(job.Queue):
jobList.append(job)
return jobList
#######################################################################################################################
class JobsUHandler(xml.sax.handler.ContentHandler):
def __init__(self, step):
self.step = step
self.cur_job = None
self.jobs = []
self.cur_time = time.time()
self.job_state = ""
self.text = ""
def startDocument(self):
pass
def endDocument(self):
if self.cur_job is not None:
self.jobs.append(self.cur_job)
def startElement(self, name, attrs):
if name == "job_list":
self.job_state = attrs["state"]
def endElement(self, name):
self._handleElement(name)
# get ready for next element
self.text = ""
def _handleElement(self, name):
# get rid of whitespace on either side
self.text = self.text.lstrip().rstrip()
if name == "JB_job_number":
if self.cur_job is not None:
self.jobs.append(self.cur_job)
self.cur_job = computing_activity.ComputingActivity()
self.cur_job.LocalIDFromManager = self.text
if name == "state":
if self.text.find("r") >= 0:
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_RUNNING]
elif self.text.find("R") >= 0: # restarted
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_RUNNING]
elif self.text.find("d") >= 0: # deleted
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_TERMINATED]
elif self.text.find("E") >= 0: # error - Eqw
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_FAILED]
elif self.text.find("h") >= 0: # held - hqw, hr
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_HELD]
elif self.text.find("w") >= 0: # waiting - qw
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_PENDING]
elif self.text.find("t") >= 0: # transfering
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_PENDING]
else:
self.step.warning(
"found unknown SGE job state '" + self.text + "'")
self.cur_job.State = [
computing_activity.ComputingActivity.STATE_UNKNOWN]
self.cur_job.State.append("sge:"+self.job_state)
if name == "JAT_start_time":
self.cur_job.StartTime = _getDateTime(self.text)
# JAT_submission_time isn't provided for running jobs, so just get it from -j
def characters(self, ch):
# all of the text for an element may not come at once
self.text = self.text + ch
#######################################################################################################################
def parseJLines(output, jobs, step):
cur_time = time.time()
job_strings = []
index = output.find("<JB_job_number>")
while index >= 0:
next_index = output.find("<JB_job_number>", index+1)
if next_index == -1:
job_strings.append(output[index:])
else:
job_strings.append(output[index:next_index])
index = next_index
cur_job = None
for job_string in job_strings:
m = re.search("<JB_job_number>(\S+)</JB_job_number>", job_string)
if m is not None:
try:
cur_job = jobs[m.group(1)]
except KeyError:
continue
else:
continue
m = re.search("<JB_job_name>(\S+)</JB_job_name>", job_string)
if m is not None:
cur_job.Name = m.group(1)
m = re.search("<JB_owner>(\S+)</JB_owner>", job_string)
if m is not None:
cur_job.LocalOwner = m.group(1)
m = re.search("<JB_account>(\S+)</JB_account>", job_string)
if m is not None:
cur_job.Extension["LocalAccount"] = m.group(1)
m = re.search("<QR_name>(\S+)</QR_name>", job_string)
if m is not None:
cur_job.Queue = m.group(1)
m = re.search(
"<JB_submission_time>(\S+)</JB_submission_time>", job_string)
if m is not None:
cur_job.SubmissionTime = epochToDateTime(
int(m.group(1)), localtzoffset())
cur_job.ComputingManagerSubmissionTime = cur_job.SubmissionTime
else:
step.warning("didn't find submission time in %s", job_string)
m = re.search("<JB_pe_range>([\s\S]+)</JB_pe_range>", job_string)
if m is not None:
m = re.search("<RN_min>(\S+)</RN_min>", m.group(1))
if m is not None:
cur_job.RequestedSlots = int(m.group(1))
lstrings = re.findall(
"<qstat_l_requests>[\s\S]+?</qstat_l_requests>", job_string)
for str in lstrings:
if "h_rt" in str:
m = re.search("<CE_doubleval>(\S+)</CE_doubleval>", str)
if m is not None:
cur_job.RequestedTotalWallTime = cur_job.RequestedSlots * \
int(float(m.group(1)))
# start time isn't often in the -j output, so get it from -u
if cur_job.StartTime is not None:
usedWallTime = int(
cur_time - time.mktime(cur_job.StartTime.timetuple()))
cur_job.UsedTotalWallTime = usedWallTime * cur_job.RequestedSlots
# looks like PET_end_time isn't ever valid
sstrings = re.findall("<scaled>[\s\S]+?</scaled>", job_string)
for str in sstrings:
m = re.search("<UA_value>(\S+)</UA_value>", str)
if m is None:
continue
if "<UA_name>end_time</UA_name>" in str:
if int(float(m.group(1))) > 0:
cur_job.ComputingManagerEndTime = epochToDateTime(
float(m.group(1)), localtzoffset())
if "<UA_name>exit_status</UA_name>" in str:
cur_job.ComputingManagerExitCode = m.group(1)
#######################################################################################################################
def _getDateTime(dtStr):
# Example: 2010-08-04T14:01:54
year = int(dtStr[0:4])
month = int(dtStr[5:7])
day = int(dtStr[8:10])
hour = int(dtStr[11:13])
minute = int(dtStr[14:16])
second = int(dtStr[17:19])
return datetime.datetime(year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second,
tzinfo=localtzoffset())
#######################################################################################################################
class ComputingActivityUpdateStep(computing_activity.ComputingActivityUpdateStep):
def __init__(self):
computing_activity.ComputingActivityUpdateStep.__init__(self)
self._acceptParameter(
"reporting_file", "the path to the SGE reporting file (optional)", False)
self._acceptParameter(
"qstat", "the path to the SGE qstat program (default 'qstat')", False)
self.activities = {}
def _run(self):
self.info("running")
# if a site is generating a schedd_runlog, can use it to find jobs that are held because of dependencies
try:
reporting_file = self.params["reporting_file"]
except KeyError:
try:
reporting_file = os.path.join(
os.environ["SGE_ROOT"], "default", "common", "reporting")
except KeyError:
msg = "no reporting_file specified and the SGE_ROOT environment variable is not set"
self.error(msg)
raise StepError(msg)
watcher = LogFileWatcher(
self._logEntry, reporting_file, self.position_file)
watcher.run()
def _logEntry(self, log_file_name, line):
if line.startswith("#"):
return
toks = line.split(":")
if toks[1] == "new_job":
pass # there is a job_log for every new job, so ignore these
elif toks[1] == "job_log":
self.handleJobLog(toks)
elif toks[1] == "queue":
pass # ignore
elif toks[1] == "acct":
# accounting records have job configuration information, but are generated when the job completes
pass
else:
self.info("unknown type: %s" % toks[1])
def handleJobLog(self, toks):
# log time
# job_log
# event time ?
# type
# job id
# dunno (always 0)
# dunno (always NONE)
# dunno (r, t, T ...)
# source? (master, execution daemon, scheduler)
# dunno (sge2.ranger.tacc.utexas.edu)
# dunno (0)
# dunno (always 1024)
# time of some kind
# job name?
# user name
# group name
# queue
# department (ignore)
# charge account
# comment
if len(toks) != 20:
logger.warning(
"Expected 20 tokens in log entry, but found %d. Ignoring." % len(toks))
return
if toks[8] == "execution daemon":
# these are redundant to what master logs, so ignore
return
if toks[4] in self.activities:
activity = self.activities[toks[4]]
# activity will be modified - update creation time
activity.CreationTime = datetime.datetime.now(tzoffset(0))
else:
activity = computing_activity.ComputingActivity()
event_dt = datetime.datetime.fromtimestamp(float(toks[2]), tzoffset(0))
activity.LocalIDFromManager = toks[4]
activity.Name = toks[13]
activity.LocalOwner = toks[14]
# ignore group
activity.Queue = toks[16]
activity.Extension["LocalAccount"] = toks[18]
if toks[3] == "pending":
activity.State = [
computing_activity.ComputingActivity.STATE_PENDING]
activity.SubmissionTime = event_dt
activity.ComputingManagerSubmissionTime = event_dt
self.activities[activity.LocalIDFromManager] = activity
elif toks[3] == "sent":
# sent to execd - just ignore
return
elif toks[3] == "delivered":
# job received by execd - job started
activity.State = [
computing_activity.ComputingActivity.STATE_RUNNING]
activity.StartTime = event_dt
elif toks[3] == "finished":
if activity.ComputingManagerEndTime is not None:
# could be a finished message after an error - ignore it
return
activity.State = [
computing_activity.ComputingActivity.STATE_FINISHED]
activity.ComputingManagerEndTime = event_dt
if activity.LocalIDFromManager in self.activities:
del self.activities[activity.LocalIDFromManager]
elif toks[3] == "deleted":
# scheduler deleting the job and a finished appears first, so ignore
return
elif toks[3] == "error":
activity.State = [
computing_activity.ComputingActivity.STATE_FAILED]
activity.ComputingManagerEndTime = event_dt
if activity.LocalIDFromManager in self.activities:
del self.activities[activity.LocalIDFromManager]
elif toks[3] == "restart":
# restart doesn't seem to mean that the job starts running again
# restarts occur after errors (an attempt to restart?) - just ignore them
return
#activity.State = [computing_activity.ComputingActivity.STATE_RUNNING]
#activity.StartTime = event_dt
else:
self.warning("unknown job log of type %s" % toks[3])
return
activity.State.append("sge:"+toks[3])
# these records are missing a few things, like the # nodes
if activity.RequestedSlots is None:
self.addInfo(activity)
if self._includeQueue(activity.Queue):
self.output(activity)
def addInfo(self, job):
try:
qstat = self.params["qstat"]
except KeyError:
qstat = "qstat"
cmd = qstat + " -xml -s prsz -j " + job.LocalIDFromManager
self.debug("running "+cmd)
status, output = subprocess.getstatusoutput(cmd)
if status != 0:
raise StepError("qstat failed: "+output+"\n")
parseJLines(output, {job.LocalIDFromManager: job}, self)
#######################################################################################################################
class ComputingSharesStep(computing_share.ComputingSharesStep):
def __init__(self):
computing_share.ComputingSharesStep.__init__(self)
self._acceptParameter(
"qconf", "the path to the SGE qconf program (default 'qconf')", False)
def _run(self):
try:
qconf = self.params["qconf"]
except KeyError:
qconf = "qconf"
cmd = qconf + " -sq \**"
self.debug("running "+cmd)
status, output = subprocess.getstatusoutput(cmd)
if status != 0:
self.error("qconf failed: "+output+"\n")
raise StepError("qconf failed: "+output+"\n")
queues = []
queueStrings = output.split("\n\n")
for queueString in queueStrings:
queue = self._getQueue(queueString)
if self._includeQueue(queue.Name):
queues.append(queue)
return queues
def _getQueue(self, queueString):
queue = computing_share.ComputingShare()
lines = queueString.split("\n")
queueName = None
for line in lines:
if line.startswith("qname "):
queueName = line[5:].lstrip()
break
queue.Name = queueName
queue.MappingQueue = queue.Name
for line in lines:
if line.startswith("s_rt "):
value = line[4:].lstrip()
if value != "INFINITY":
queue.MaxWallTime = self._getDuration(value)
if line.startswith("s_cpu "):
value = line[5:].lstrip()
if value != "INFINITY":
queue.MaxTotalCPUTime = self._getDuration(value)
if line.startswith("h_data "):
value = line[6:].lstrip()
if value != "INFINITY":
# MaxMainMemory is MB
try:
# if no units, it is bytes
queue.MaxMainMemory = int(value) / 1024 / 1024
except ValueError:
# may have a unit on the end
unit = value[len(value-1):]
try:
mem = int(value[:len(value-1)])
if unit == "K":
queue.MaxMainMemory = mem / 1024
if unit == "G":
queue.MaxMainMemory = mem * 1024
except ValueError:
pass
return queue
def _getDuration(self, dStr):
(hour, minute, second) = dStr.split(":")
return int(hour)*60*60 + int(minute)*60 + int(second)
#######################################################################################################################
class ExecutionEnvironmentsStep(execution_environment.ExecutionEnvironmentsStep):
def __init__(self):
execution_environment.ExecutionEnvironmentsStep.__init__(self)
self._acceptParameter(
"qhost", "the path to the SGE qhost program (default 'qhost')", False)
def _run(self):
try:
qhost = self.params["qhost"]
except KeyError:
qhost = "qhost"
cmd = qhost + " -xml -q"
self.debug("running "+cmd)
status, output = subprocess.getstatusoutput(cmd)
if status != 0:
self.error("qhost failed: "+output+"\n")
raise StepError("qhost failed: "+output+"\n")
handler = HostsHandler(self)
xml.sax.parseString(output, handler)
hosts = []
for host in handler.hosts:
if self._goodHost(host):
hosts.append(host)
return self._groupHosts(hosts)
#######################################################################################################################
class HostsHandler(xml.sax.handler.ContentHandler):
def __init__(self, step):
self.step = step
self.cur_host = None
self.hosts = []
self.cur_time = time.time()
self.hostvalue_name = None
self.text = ""
def startDocument(self):
pass
def endDocument(self):
if self.cur_host is not None and self._goodHost(self.cur_host):
self.hosts.append(self.cur_host)
def startElement(self, name, attrs):
if name == "host":
self.cur_host = execution_environment.ExecutionEnvironment()
self.cur_host.Name = attrs.getValue("name")
self.cur_host.TotalInstances = 1
elif name == "queue":
queue = attrs.getValue("name")
self.cur_host.ShareID.append(
"urn:glue2:ComputingShare:%s.%s" % (queue, self.step.resource_name))
elif name == "hostvalue":
self.hostvalue_name = attrs.getValue("name")
def endElement(self, name):
if name == "host":
if self.cur_host.PhysicalCPUs is not None:
self.hosts.append(self.cur_host)
self.cur_host = None
self.text = self.text.lstrip().rstrip()
if name == "hostvalue":
if self.hostvalue_name == "arch_string":
# SGE does some unknown crazy stuff to get their arch string. Just use the defaults.
pass
elif self.hostvalue_name == "num_proc":
if self.text != "-":
self.cur_host.PhysicalCPUs = int(self.text)
# don't have enough info for something else
self.cur_host.LogicalCPUs = self.cur_host.PhysicalCPUs
elif self.hostvalue_name == "load_avg":
if self.text == "-":
self.cur_host.UsedInstances = 0
self.cur_host.UnavailableInstances = 1
else:
# saw '1.07K'
if self.text[-1] == "K":
load = float(self.text[:-1]) * 1000
else:
load = float(self.text)
if load > float(self.cur_host.PhysicalCPUs)/2:
self.cur_host.Extension["UsedAverageLoad"] = load
self.cur_host.UsedInstances = 1
self.cur_host.UnavailableInstances = 0
else:
self.cur_host.Extension["AvailableAverageLoad"] = load
self.cur_host.UsedInstances = 0
self.cur_host.UnavailableInstances = 0
elif self.hostvalue_name == "mem_total":
if self.text != "-":
units = self.text[len(self.text)-1:] # 'M' or 'G'
memSize = float(self.text[:len(self.text)-1])
if units == "G":
self.cur_host.MainMemorySize = int(memSize * 1024)
elif units == "M":
self.cur_host.MainMemorySize = int(memSize)
else:
self.step.warning(
"couldn't handle memory units of '"+units+"'")
elif self.hostvalue_name == "mem_used":
pass
elif self.hostvalue_name == "swap_total":
pass
elif self.hostvalue_name == "swap_used":
pass
self.hostvalue_name = None
self.text = ""
def characters(self, ch):
# all of the text for an element may not come at once
self.text = self.text + ch
#######################################################################################################################
class AcceleratorEnvironmentsStep(accelerator_environment.AcceleratorEnvironmentsStep):
def __init__(self):
accelerator_environment.AcceleratorEnvironmentsStep.__init__(self)
self._acceptParameter(
"scontrol", "the path to the SLURM scontrol program (default 'scontrol')", False)
def _run(self):
# get info on the nodes
return
#######################################################################################################################
class ComputingManagerAcceleratorInfoStep(computing_manager_accel_info.ComputingManagerAcceleratorInfoStep):
def __init__(self):
computing_manager_accel_info.ComputingManagerAcceleratorInfoStep.__init__(
self)
def _run(self):
manager_accel_info = computing_manager_accel_info.ComputingManagerAcceleratorInfo()
return manager_accel_info
#######################################################################################################################
class ComputingShareAcceleratorInfoStep(computing_share_accel_info.ComputingShareAcceleratorInfoStep):
def __init__(self):
computing_share_accel_info.ComputingShareAcceleratorInfoStep.__init__(
self)
def _run(self):
share_accel_info = computing_share_accel_info.ComputingShareAcceleratorInfo()
return share_accel_info
#######################################################################################################################
|
apache-2.0
|
carlgao/lenga
|
images/lenny64-peon/usr/share/fail2ban/server/action.py
|
2
|
7669
|
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision: 682 $
__author__ = "Cyril Jaquier"
__version__ = "$Revision: 682 $"
__date__ = "$Date: 2008-04-08 00:25:16 +0200 (Tue, 08 Apr 2008) $"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import logging, os
#from subprocess import call
# Gets the instance of the logger.
logSys = logging.getLogger("fail2ban.actions.action")
##
# Execute commands.
#
# This class reads the failures from the Jail queue and decide if an
# action has to be taken. A BanManager take care of the banned IP
# addresses.
class Action:
def __init__(self, name):
self.__name = name
self.__cInfo = dict()
## Command executed in order to initialize the system.
self.__actionStart = ''
## Command executed when an IP address gets banned.
self.__actionBan = ''
## Command executed when an IP address gets removed.
self.__actionUnban = ''
## Command executed in order to check requirements.
self.__actionCheck = ''
## Command executed in order to stop the system.
self.__actionStop = ''
logSys.debug("Created Action")
##
# Sets the action name.
#
# @param name the name of the action
def setName(self, name):
self.__name = name
##
# Returns the action name.
#
# @return the name of the action
def getName(self):
return self.__name
##
# Sets a "CInfo".
#
# CInfo are statically defined properties. They can be definied by
# the user and are used to set e-mail addresses, port, host or
# anything that should not change during the life of the server.
#
# @param key the property name
# @param value the property value
def setCInfo(self, key, value):
self.__cInfo[key] = value
##
# Returns a "CInfo".
#
# @param key the property name
def getCInfo(self, key):
return self.__cInfo[key]
##
# Removes a "CInfo".
#
# @param key the property name
def delCInfo(self, key):
del self.__cInfo[key]
##
# Set the "start" command.
#
# @param value the command
def setActionStart(self, value):
self.__actionStart = value
logSys.debug("Set actionStart = %s" % value)
##
# Get the "start" command.
#
# @return the command
def getActionStart(self):
return self.__actionStart
##
# Executes the action "start" command.
#
# Replaces the tags in the action command with value of "cInfo"
# and executes the resulting command.
#
# @return True if the command succeeded
def execActionStart(self):
startCmd = Action.replaceTag(self.__actionStart, self.__cInfo)
return Action.executeCmd(startCmd)
##
# Set the "ban" command.
#
# @param value the command
def setActionBan(self, value):
self.__actionBan = value
logSys.debug("Set actionBan = %s" % value)
##
# Get the "ban" command.
#
# @return the command
def getActionBan(self):
return self.__actionBan
##
# Executes the action "ban" command.
#
# @return True if the command succeeded
def execActionBan(self, aInfo):
return self.__processCmd(self.__actionBan, aInfo)
##
# Set the "unban" command.
#
# @param value the command
def setActionUnban(self, value):
self.__actionUnban = value
logSys.debug("Set actionUnban = %s" % value)
##
# Get the "unban" command.
#
# @return the command
def getActionUnban(self):
return self.__actionUnban
##
# Executes the action "unban" command.
#
# @return True if the command succeeded
def execActionUnban(self, aInfo):
return self.__processCmd(self.__actionUnban, aInfo)
##
# Set the "check" command.
#
# @param value the command
def setActionCheck(self, value):
self.__actionCheck = value
logSys.debug("Set actionCheck = %s" % value)
##
# Get the "check" command.
#
# @return the command
def getActionCheck(self):
return self.__actionCheck
##
# Set the "stop" command.
#
# @param value the command
def setActionStop(self, value):
self.__actionStop = value
logSys.debug("Set actionStop = %s" % value)
##
# Get the "stop" command.
#
# @return the command
def getActionStop(self):
return self.__actionStop
##
# Executes the action "stop" command.
#
# Replaces the tags in the action command with value of "cInfo"
# and executes the resulting command.
#
# @return True if the command succeeded
def execActionStop(self):
stopCmd = Action.replaceTag(self.__actionStop, self.__cInfo)
return Action.executeCmd(stopCmd)
##
# Replaces tags in query with property values in aInfo.
#
# @param query the query string with tags
# @param aInfo the properties
# @return a string
#@staticmethod
def replaceTag(query, aInfo):
""" Replace tags in query
"""
string = query
for tag in aInfo:
string = string.replace('<' + tag + '>', str(aInfo[tag]))
# New line
string = string.replace("<br>", '\n')
return string
replaceTag = staticmethod(replaceTag)
##
# Executes a command with preliminary checks and substitutions.
#
# Before executing any commands, executes the "check" command first
# in order to check if prerequirements are met. If this check fails,
# it tries to restore a sane environnement before executing the real
# command.
# Replaces "aInfo" and "cInfo" in the query too.
#
# @param cmd The command to execute
# @param aInfo Dynamic properties
# @return True if the command succeeded
def __processCmd(self, cmd, aInfo = None):
""" Executes an OS command.
"""
if cmd == "":
logSys.debug("Nothing to do")
return True
checkCmd = Action.replaceTag(self.__actionCheck, self.__cInfo)
if not Action.executeCmd(checkCmd):
logSys.error("Invariant check failed. Trying to restore a sane" +
" environment")
stopCmd = Action.replaceTag(self.__actionStop, self.__cInfo)
Action.executeCmd(stopCmd)
startCmd = Action.replaceTag(self.__actionStart, self.__cInfo)
Action.executeCmd(startCmd)
if not Action.executeCmd(checkCmd):
logSys.fatal("Unable to restore environment")
return False
# Replace tags
if not aInfo == None:
realCmd = Action.replaceTag(cmd, aInfo)
else:
realCmd = cmd
# Replace static fields
realCmd = Action.replaceTag(realCmd, self.__cInfo)
return Action.executeCmd(realCmd)
##
# Executes a command.
#
# We need a shell here because commands are mainly shell script. They
# contain pipe, redirection, etc.
#
# @todo Force the use of bash!?
# @todo Kill the command after a given timeout
#
# @param realCmd the command to execute
# @return True if the command succeeded
#@staticmethod
def executeCmd(realCmd):
logSys.debug(realCmd)
try:
# The following line gives deadlock with multiple jails
#retcode = call(realCmd, shell=True)
retcode = os.system(realCmd)
if retcode == 0:
logSys.debug("%s returned successfully" % realCmd)
return True
else:
logSys.error("%s returned %x" % (realCmd, retcode))
except OSError, e:
logSys.error("%s failed with %s" % (realCmd, e))
return False
executeCmd = staticmethod(executeCmd)
|
mit
|
roshan/thrift
|
tutorial/py.tornado/PythonServer.py
|
2
|
2986
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import glob
sys.path.append('gen-py.tornado')
sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from tutorial import Calculator
from tutorial.ttypes import Operation, InvalidOperation
from shared.ttypes import SharedStruct
from thrift import TTornado
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from tornado import ioloop
class CalculatorHandler(object):
def __init__(self):
self.log = {}
def ping(self, callback):
print("ping()")
callback()
def add(self, n1, n2, callback):
print("add({}, {})".format(n1, n2))
callback(n1 + n2)
def calculate(self, logid, work, callback):
print("calculate({}, {})".format(logid, work))
if work.op == Operation.ADD:
val = work.num1 + work.num2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
x = InvalidOperation()
x.whatOp = work.op
x.why = "Cannot divide by 0"
raise x
val = work.num1 / work.num2
else:
x = InvalidOperation()
x.whatOp = work.op
x.why = "Invalid operation"
raise x
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
callback(val)
def getStruct(self, key, callback):
print("getStruct({})".format(key))
callback(self.log[key])
def zip(self, callback):
print("zip()")
callback()
def main():
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TTornado.TTornadoServer(processor, pfactory)
print("Starting the server...")
server.bind(9090)
server.start(1)
ioloop.IOLoop.instance().start()
print("done.")
if __name__ == "__main__":
main()
|
apache-2.0
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/openmdao.util-0.13.0-py2.7.egg/openmdao/util/publickey.py
|
1
|
18058
|
"""
Support for generation, use, and storage of public/private key pairs.
The :func:`pk_encrypt`, :func:`pk_decrypt`, :func:`pk_sign`, and
:func:`pk_verify` functions provide a thin interface over
:class:`Crypto.PublicKey.RSA` methods for easier use and to work around some
issues found with some keys read from ssh ``id_rsa`` files.
"""
import base64
import cPickle
import getpass
import logging
import os.path
import socket
import sys
import threading
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Util.number import bytes_to_long
if sys.platform == 'win32': #pragma no cover
try:
import win32api
import win32con
import win32security
import ntsecuritycon
except ImportError:
HAVE_PYWIN32 = False
else:
HAVE_PYWIN32 = True
else:
HAVE_PYWIN32 = False
from openmdao.util.log import NullLogger
# Cache of client key pairs indexed by user.
_KEY_CACHE = {}
_KEY_CACHE_LOCK = threading.Lock()
def get_key_pair(user_host, logger=None,
overwrite_cache=False, ignore_ssh=False):
"""
Returns RSA key containing both public and private keys for the user
identified in `user_host`. This can be an expensive operation, so
we avoid generating a new key pair whenever possible.
If ``~/.ssh/id_rsa`` exists and is private, that key is returned.
user_host: string
Format ``user@host``.
logger: :class:`logging.Logger`
Used for debug messages.
overwrite_cache: bool
If True, a new key is generated and forced into the cache of existing
known keys. Used for testing.
ignore_ssh: bool
If True, ignore any existing ssh id_rsa key file. Used for testing.
.. note::
To avoid unnecessary key generation, the public/private key pair for
the current user is stored in the private file ``~/.openmdao/keys``.
On Windows this requires the pywin32 extension. Also, the public
key is stored in ssh form in ``~/.openmdao/id_rsa.pub``.
"""
logger = logger or NullLogger()
with _KEY_CACHE_LOCK:
if overwrite_cache:
key_pair = _generate(user_host, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
# Look in previously generated keys.
try:
key_pair = _KEY_CACHE[user_host]
except KeyError:
# If key for current user (typical), check filesystem.
# TODO: file lock to protect from separate processes.
user, host = user_host.split('@')
if user == getpass.getuser():
current_user = True
key_pair = None
# Try to re-use SSH key. Exceptions should *never* be exercised!
if not ignore_ssh:
id_rsa = \
os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa'))
if is_private(id_rsa):
try:
with open(id_rsa, 'r') as inp:
key_pair = RSA.importKey(inp.read())
except Exception as exc: #pragma no cover
logger.warning('ssh id_rsa import: %r', exc)
else:
generate = False
else: #pragma no cover
logger.warning('Ignoring insecure ssh id_rsa.')
if key_pair is None:
# Look for OpenMDAO key.
key_file = \
os.path.expanduser(os.path.join('~', '.openmdao', 'keys'))
if is_private(key_file):
try:
with open(key_file, 'rb') as inp:
key_pair = cPickle.load(inp)
except Exception:
generate = True
else:
generate = False
else:
logger.warning('Insecure keyfile! Regenerating keys.')
os.remove(key_file)
generate = True
# Difficult to run test as non-current user.
else: #pragma no cover
current_user = False
generate = True
if generate:
key_pair = _generate(user_host, logger)
if current_user:
key_dir = os.path.dirname(key_file)
if not os.path.exists(key_dir):
os.mkdir(key_dir)
# Save key pair in protected file.
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
logger.debug('No pywin32, not saving keyfile')
else:
make_private(key_dir) # Private while writing keyfile.
with open(key_file, 'wb') as out:
cPickle.dump(key_pair, out,
cPickle.HIGHEST_PROTOCOL)
try:
make_private(key_file)
# Hard to cause (recoverable) error here.
except Exception: #pragma no cover
os.remove(key_file) # Remove unsecured file.
raise
# Save public key in ssh form.
users = {user_host: key_pair.publickey()}
filename = os.path.join(key_dir, 'id_rsa.pub')
write_authorized_keys(users, filename, logger)
_KEY_CACHE[user_host] = key_pair
return key_pair
def _generate(user_host, logger):
""" Return new key. """
logger.debug('generating public key for %r...', user_host)
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
strength = 1024 # Much quicker to generate.
else:
strength = 2048
key_pair = RSA.generate(strength, get_random_bytes)
logger.debug(' done')
return key_pair
def pk_encrypt(data, public_key):
"""
Return list of chunks of `data` encrypted by `public_key`.
data: string
The message to be encrypted.
public_key: :class:`Crypto.PublicKey.RSA`
Public portion of key pair.
"""
# Normally we would use 8 rather than 16 here, but for some reason at least
# some keys read from ssh id_rsa files don't work correctly with 8.
chunk_size = public_key.size() / 16
chunks = []
while data:
chunks.append(public_key.encrypt(data[:chunk_size], ''))
data = data[chunk_size:]
return chunks
def pk_decrypt(encrypted, private_key):
"""
Return `encrypted` decrypted by `private_key` as a string.
encrypted: list
Chunks of encrypted data returned by :func:`pk_encrypt`.
private_key: :class:`Crypto.PublicKey.RSA`
Private portion of key pair.
"""
data = ''
for chunk in encrypted:
data += private_key.decrypt(chunk)
return data
def pk_sign(hashed, private_key):
"""
Return signature for `hashed` using `private_key`.
hashed: string
A hash value of the data to be signed.
private_key: :class:`Crypto.PublicKey.RSA`
Private portion of key pair.
"""
# Normally we would just do:
# return private_key.sign(hashed, '')
# But that fails for at least some keys from ssh id_rsa files.
# Instead, use the 'slowmath' method:
c = bytes_to_long(hashed)
m = pow(c, private_key.d, private_key.n)
return (m,)
def pk_verify(hashed, signature, public_key):
"""
Verify `hashed` based on `signature` and `public_key`.
hashed: string
A hash for the data that is signed.
signature: tuple
Value returned by :func:`pk_sign`.
public_key: :class:`Crypto.PublicKey.RSA`
Public portion of key pair.
"""
return public_key.verify(hashed, signature)
def is_private(path):
"""
Return True if `path` is accessible only by 'owner'.
path: string
Path to file or directory to check.
.. note::
On Windows this requires the pywin32 extension.
"""
if not os.path.exists(path):
return True # Nonexistent file is secure ;-)
if sys.platform == 'win32': #pragma no cover
if not HAVE_PYWIN32:
return False # No way to know.
# Find the SIDs for user and system.
username = win32api.GetUserNameEx(win32con.NameSamCompatible)
# Map Cygwin 'root' to 'Administrator'. Typically these are intended
# to be identical, but /etc/passwd might configure them differently.
if username.endswith('\\root'):
username = username.replace('\\root', '\\Administrator')
user, domain, type = win32security.LookupAccountName('', username)
system, domain, type = win32security.LookupAccountName('', 'System')
# Find the DACL part of the Security Descriptor for the file
sd = win32security.GetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION)
dacl = sd.GetSecurityDescriptorDacl()
if dacl is None:
logging.warning('is_private: No DACL for %r', path)
return False # Happened on a user's XP system.
# Verify the DACL contains just the two entries we expect.
count = dacl.GetAceCount()
if count != 2:
return False
for i in range(count):
ace = dacl.GetAce(i)
if ace[2] != user and ace[2] != system:
return False
return True
else:
return (os.stat(path).st_mode & 0077) == 0
def make_private(path):
"""
Make `path` accessible only by 'owner'.
path: string
Path to file or directory to be made private.
.. note::
On Windows this requires the pywin32 extension.
"""
if sys.platform == 'win32': #pragma no cover
if not HAVE_PYWIN32:
raise ImportError('No pywin32')
# Find the SIDs for user and system.
username = win32api.GetUserNameEx(win32con.NameSamCompatible)
# Map Cygwin 'root' to 'Administrator'. Typically these are intended
# to be identical, but /etc/passwd might configure them differently.
if username.endswith('\\root'):
username = username.replace('\\root', '\\Administrator')
user, domain, type = win32security.LookupAccountName('', username)
system, domain, type = win32security.LookupAccountName('', 'System')
# Find the DACL part of the Security Descriptor for the file
sd = win32security.GetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION)
# Create a blank DACL and add the ACEs we want.
dacl = win32security.ACL()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION,
ntsecuritycon.FILE_ALL_ACCESS, system)
# Put our new DACL into the Security Descriptor and update the file
# with the updated SD.
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(path,
win32security.DACL_SECURITY_INFORMATION,
sd)
else:
# Normal chmod() works on test machines with ACLs enabled, but a user
# in the field reported a situation where it didn't. This code tries
# using libacl if it can. Doesn't seem to cause any problems, not
# verifed that it helps though.
try:
# From pylibacl, which requires 'libacl1-dev'.
import posix1e
except ImportError:
mode = 0700 if os.path.isdir(path) else 0600
os.chmod(path, mode) # Read/Write/Execute
else:
if os.path.isdir(path):
acl = posix1e.ACL(text='u::rwx,g::-,o::-')
else:
acl = posix1e.ACL(text='u::rw,g::-,o::-')
acl.applyto(path)
if not is_private(path):
raise RuntimeError("Can't make %r private" % path)
def encode_public_key(key):
"""
Return base64 text representation of public key `key`.
key: public key
Public part of key pair.
"""
# Just being defensive, this should never happen.
if key.has_private(): #pragma no cover
key = key.publickey()
return base64.b64encode(cPickle.dumps(key, cPickle.HIGHEST_PROTOCOL))
def decode_public_key(text):
"""
Return public key from text representation.
text: string
base64 encoded key data.
"""
return cPickle.loads(base64.b64decode(text))
def read_authorized_keys(filename=None, logger=None):
"""
Return dictionary of public keys, indexed by user, read from `filename`.
The file must be in ssh format, and only RSA keys are processed.
If the file is not private, then no keys are returned.
filename: string
File to read from. The default is ``~/.ssh/authorized_keys``.
logger: :class:`logging.Logger`
Used for log messages.
"""
if not filename:
filename = \
os.path.expanduser(os.path.join('~', '.ssh', 'authorized_keys'))
logger = logger or NullLogger()
if not os.path.exists(filename):
raise RuntimeError('%r does not exist' % filename)
if not is_private(filename):
if sys.platform != 'win32' or HAVE_PYWIN32:
raise RuntimeError('%r is not private' % filename)
else: #pragma no cover
logger.warning('Allowed users file %r is not private', filename)
errors = 0
keys = {}
with open(filename, 'r') as inp:
for line in inp:
line = line.rstrip()
sharp = line.find('#')
if sharp >= 0:
line = line[:sharp]
if not line:
continue
key_type, blank, rest = line.partition(' ')
if key_type != 'ssh-rsa':
logger.error('unsupported key type: %r', key_type)
errors += 1
continue
key_data, blank, user_host = rest.partition(' ')
if not key_data:
logger.error('bad line (missing key data):')
logger.error(line)
errors += 1
continue
try:
user, host = user_host.split('@')
except ValueError:
logger.error('bad line (require user@host):')
logger.error(line)
errors += 1
continue
logger.debug('user %r, host %r', user, host)
try:
ip_addr = socket.gethostbyname(host)
except socket.gaierror:
logger.warning('unknown host %r', host)
logger.warning(line)
data = base64.b64decode(key_data)
start = 0
name_len = _longint(data, start, 4)
start += 4
name = data[start:start+name_len]
if name != 'ssh-rsa':
logger.error('name error: %r vs. ssh-rsa', name)
logger.error(line)
errors += 1
continue
start += name_len
e_len = _longint(data, start, 4)
start += 4
e = _longint(data, start, e_len)
start += e_len
n_len = _longint(data, start, 4)
start += 4
n = _longint(data, start, n_len)
start += n_len
if start != len(data):
logger.error('length error: %d vs. %d', start, len(data))
logger.error(line)
errors += 1
continue
try:
pubkey = RSA.construct((n, e))
except Exception as exc:
logger.error('key construct error: %r', exc)
errors += 1
else:
keys[user_host] = pubkey
if errors:
raise RuntimeError('%d errors in %r, check log for details'
% (errors, filename))
return keys
def _longint(buf, start, length):
""" Return long value from binary string. """
value = long(0)
for i in range(length):
value = (value << 8) + ord(buf[start])
start += 1
return value
def write_authorized_keys(allowed_users, filename, logger=None):
"""
Write `allowed_users` to `filename` in ssh format.
The file will be made private if supported on this platform.
allowed_users: dict
Dictionary of public keys indexed by user.
filename: string
File to write to.
logger: :class:`logging.Logger`
Used for log messages.
"""
logger = logger or NullLogger()
with open(filename, 'w') as out:
for user in sorted(allowed_users.keys()):
pubkey = allowed_users[user]
buf = 'ssh-rsa'
key_data = _longstr(len(buf), 4)
key_data += buf
buf = _longstr(pubkey.e)
key_data += _longstr(len(buf), 4)
key_data += buf
buf = _longstr(pubkey.n)
key_data += _longstr(len(buf), 4)
key_data += buf
data = base64.b64encode(key_data)
out.write('ssh-rsa %s %s\n\n' % (data, user))
if sys.platform == 'win32' and not HAVE_PYWIN32: #pragma no cover
logger.warning("Can't make authorized keys file %r private", filename)
else:
make_private(filename)
def _longstr(num, length=0):
""" Return binary string representation of `num`. """
buf = chr(num & 0xff)
num >>= 8
while num:
buf = chr(num & 0xff) + buf
num >>= 8
while len(buf) < length:
buf = chr(0) + buf
return buf
|
gpl-2.0
|
lseyesl/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
|
119
|
9007
|
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python module for interacting with an SCM system (like SVN or Git)
import logging
import re
import sys
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem import FileSystem
_log = logging.getLogger(__name__)
class CheckoutNeedsUpdate(ScriptError):
def __init__(self, script_args, exit_code, output, cwd):
ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
# FIXME: Should be moved onto SCM
def commit_error_handler(error):
if re.search("resource out of date", error.output):
raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
Executive.default_error_handler(error)
class AuthenticationError(Exception):
def __init__(self, server_host, prompt_for_password=False):
self.server_host = server_host
self.prompt_for_password = prompt_for_password
# SCM methods are expected to return paths relative to self.checkout_root.
class SCM:
def __init__(self, cwd, executive=None, filesystem=None):
self.cwd = cwd
self._executive = executive or Executive()
self._filesystem = filesystem or FileSystem()
self.checkout_root = self.find_checkout_root(self.cwd)
# A wrapper used by subclasses to create processes.
def run(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True, decode_output=True):
# FIXME: We should set cwd appropriately.
return self._executive.run_command(args,
cwd=cwd,
input=input,
error_handler=error_handler,
return_exit_code=return_exit_code,
return_stderr=return_stderr,
decode_output=decode_output)
# SCM always returns repository relative path, but sometimes we need
# absolute paths to pass to rm, etc.
def absolute_path(self, repository_relative_path):
return self._filesystem.join(self.checkout_root, repository_relative_path)
# FIXME: This belongs in Checkout, not SCM.
def scripts_directory(self):
return self._filesystem.join(self.checkout_root, "Tools", "Scripts")
# FIXME: This belongs in Checkout, not SCM.
def script_path(self, script_name):
return self._filesystem.join(self.scripts_directory(), script_name)
def run_status_and_extract_filenames(self, status_command, status_regexp):
filenames = []
# We run with cwd=self.checkout_root so that returned-paths are root-relative.
for line in self.run(status_command, cwd=self.checkout_root).splitlines():
match = re.search(status_regexp, line)
if not match:
continue
# status = match.group('status')
filename = match.group('filename')
filenames.append(filename)
return filenames
def strip_r_from_svn_revision(self, svn_revision):
match = re.match("^r(?P<svn_revision>\d+)", unicode(svn_revision))
if (match):
return match.group('svn_revision')
return svn_revision
def svn_revision_from_commit_text(self, commit_text):
match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE)
return match.group('svn_revision')
@staticmethod
def _subclass_must_implement():
raise NotImplementedError("subclasses must implement")
@classmethod
def in_working_directory(cls, path, executive=None):
SCM._subclass_must_implement()
def find_checkout_root(self, path):
SCM._subclass_must_implement()
@staticmethod
def commit_success_regexp():
SCM._subclass_must_implement()
def status_command(self):
self._subclass_must_implement()
def add(self, path):
self.add_list([path])
def add_list(self, paths):
self._subclass_must_implement()
def delete(self, path):
self.delete_list([path])
def delete_list(self, paths):
self._subclass_must_implement()
def exists(self, path):
self._subclass_must_implement()
def changed_files(self, git_commit=None):
self._subclass_must_implement()
def changed_files_for_revision(self, revision):
self._subclass_must_implement()
def revisions_changing_file(self, path, limit=5):
self._subclass_must_implement()
def added_files(self):
self._subclass_must_implement()
def conflicted_files(self):
self._subclass_must_implement()
def display_name(self):
self._subclass_must_implement()
def head_svn_revision(self):
return self.svn_revision(self.checkout_root)
def svn_revision(self, path):
"""Returns the latest svn revision found in the checkout."""
self._subclass_must_implement()
def timestamp_of_revision(self, path, revision):
self._subclass_must_implement()
def create_patch(self, git_commit=None, changed_files=None):
self._subclass_must_implement()
def committer_email_for_revision(self, revision):
self._subclass_must_implement()
def contents_at_revision(self, path, revision):
self._subclass_must_implement()
def diff_for_revision(self, revision):
self._subclass_must_implement()
def diff_for_file(self, path, log=None):
self._subclass_must_implement()
def show_head(self, path):
self._subclass_must_implement()
def apply_reverse_diff(self, revision):
self._subclass_must_implement()
def revert_files(self, file_paths):
self._subclass_must_implement()
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
self._subclass_must_implement()
def svn_commit_log(self, svn_revision):
self._subclass_must_implement()
def last_svn_commit_log(self):
self._subclass_must_implement()
def svn_blame(self, path):
self._subclass_must_implement()
def has_working_directory_changes(self):
self._subclass_must_implement()
def discard_working_directory_changes(self):
self._subclass_must_implement()
#--------------------------------------------------------------------------
# Subclasses must indicate if they support local commits,
# but the SCM baseclass will only call local_commits methods when this is true.
@staticmethod
def supports_local_commits():
SCM._subclass_must_implement()
def local_commits(self):
return []
def has_local_commits(self):
return len(self.local_commits()) > 0
def discard_local_commits(self):
return
def remote_merge_base(self):
SCM._subclass_must_implement()
def commit_locally_with_message(self, message):
_log.error("Your source control manager does not support local commits.")
sys.exit(1)
def local_changes_exist(self):
return (self.supports_local_commits() and self.has_local_commits()) or self.has_working_directory_changes()
def discard_local_changes(self):
if self.has_working_directory_changes():
self.discard_working_directory_changes()
if self.has_local_commits():
self.discard_local_commits()
|
bsd-3-clause
|
tkzeng/molecular-design-toolkit
|
moldesign/molecules/bonds.py
|
1
|
3249
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import toplevel
@toplevel
class Bond(object):
"""
A bond between two atoms.
Args:
a1 (Atom): First atom
a2 (Atom): Second atom (the order of atoms doesn't matter)
order (int): Order of the bond
Notes:
Comparisons and hashes involving bonds will return True if the atoms involved in the bonds
are the same. Bond orders are not compared.
These objects are used to represent and pass bond data only - they are not used for storage.
Attributes:
a1 (Atom): First atom in the bond; assigned so that ``self.a1.index < self.a2.index``
a2 (Atom): Second atom in the bond; assigned so that ``self.a2.index > self.a1.index``
order (int): bond order (can be ``None``); not used in comparisons
"""
def __init__(self, a1, a2, order=None):
if a1.index > a2.index:
a1, a2 = a2, a1
self.a1 = a1
self.a2 = a2
if order is None:
try: self.order = self.a1.bond_graph[a2]
except KeyError: self.order = None
else:
self.order = order
def __eq__(self, other):
return (self.a1 is other.a1) and (self.a2 is other.a2)
def __hash__(self):
"""Has this object using the atoms involved in its bond"""
return hash((self.a1, self.a2))
def partner(self, atom):
""" Return this atom's *partner* in the bond -- i.e., the other atom in the bond
Args:
atom (mdt.Atom): return the atom that this one is bonded to
Returns:
mdt.Atom: the passed atom's partner
Raises:
ValueError: if the passed atom is not part of this bond
"""
if atom is self.a1:
return self.a2
elif atom is self.a2:
return self.a1
else:
raise ValueError('%s is not part of this bond' % atom)
@property
def name(self):
""" str: name of the bond """
return '{a1.name} (#{a1.index}) - {a2.name} (#{a2.index}) (order: {order})'.format(
a1=self.a1, a2=self.a2, order=self.order)
@property
def ff(self):
"""mdt.forcefield.BondTerm: the force-field term for this bond (or ``None`` if no
forcefield is present)
"""
try: ff = self.a1.molecule.energy_model.get_forcefield()
except (NotImplementedError, AttributeError): return None
return ff.bond_term[self]
def __repr__(self):
try:
return '<Bond: %s>'%str(self)
except:
print '<Bond @ %s (error in __repr__)>' % id(self)
def __str__(self):
return self.name
|
apache-2.0
|
CCI-MOC/nova
|
nova/tests/unit/compute/test_rpcapi.py
|
5
|
21500
|
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.compute.rpcapi
"""
import contextlib
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.objects import block_device as objects_block_dev
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
CONF = cfg.CONF
class ComputeRpcAPITestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context)
self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj)
instance_attr = {'host': 'fake_host',
'instance_type_id': self.fake_flavor_obj['id'],
'instance_type': self.fake_flavor_obj}
self.fake_instance_obj = fake_instance.fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance_obj.uuid,
'volume_id': 'fake-volume-id'}))
def _test_compute_api(self, method, rpc_method,
expected_args=None, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
orig_prepare = rpcapi.client.prepare
base_version = rpcapi.client.target.version
expected_version = kwargs.pop('version', base_version)
expected_kwargs = kwargs.copy()
if expected_args:
expected_kwargs.update(expected_args)
if 'host_param' in expected_kwargs:
expected_kwargs['host'] = expected_kwargs.pop('host_param')
else:
expected_kwargs.pop('host', None)
cast_and_call = ['confirm_resize', 'stop_instance']
if rpc_method == 'call' and method in cast_and_call:
if method == 'confirm_resize':
kwargs['cast'] = False
else:
kwargs['do_cast'] = False
if 'host' in kwargs:
host = kwargs['host']
elif 'instances' in kwargs:
host = kwargs['instances'][0]['host']
else:
host = kwargs['instance']['host']
if method == 'rebuild_instance' and 'node' in expected_kwargs:
expected_kwargs['scheduled_node'] = expected_kwargs.pop('node')
with contextlib.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
if '_return_value' in kwargs:
rpc_mock.return_value = kwargs.pop('_return_value')
del expected_kwargs['_return_value']
elif 'return_bdm_object' in kwargs:
del kwargs['return_bdm_object']
rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
elif rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
rpc_mock.return_value = None
csv_mock.side_effect = (
lambda v: orig_prepare(version=v).can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(version=expected_version,
server=host)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance_obj, network_id='id',
version='4.0')
def test_attach_interface(self):
self._test_compute_api('attach_interface', 'call',
instance=self.fake_instance_obj, network_id='id',
port_id='id2', version='4.0', requested_ip='192.168.1.50')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance_obj, bdm=self.fake_volume_bdm,
version='4.0')
def test_change_instance_metadata(self):
self._test_compute_api('change_instance_metadata', 'cast',
instance=self.fake_instance_obj, diff={}, version='4.0')
def test_check_instance_shared_storage(self):
self._test_compute_api('check_instance_shared_storage', 'call',
instance=self.fake_instance_obj, data='foo',
version='4.0')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_confirm_resize_call(self):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_detach_interface(self):
self._test_compute_api('detach_interface', 'cast',
version='4.0', instance=self.fake_instance_obj,
port_id='fake_id')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
version='4.0')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
reservations=list('fake_res'))
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance_obj, tail_length='tl',
version='4.0')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance_obj, version='4.0')
def test_get_instance_diagnostics(self):
expected_args = {'instance': self.fake_instance}
self._test_compute_api('get_instance_diagnostics', 'call',
expected_args, instance=self.fake_instance_obj,
version='4.0')
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_rdp_console(self):
self._test_compute_api('get_rdp_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='4.0')
def test_get_serial_console(self):
self._test_compute_api('get_serial_console', 'call',
instance=self.fake_instance_obj, console_type='serial',
version='4.0')
def test_get_mks_console(self):
self._test_compute_api('get_mks_console', 'call',
instance=self.fake_instance_obj, console_type='webmks',
version='4.3')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance_obj, port="5900",
console_type="novnc", version='4.0')
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
def test_host_power_action(self):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
def test_inject_network_info(self):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance_obj)
def test_live_migration(self):
self._test_compute_api('live_migration', 'cast',
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migration='migration',
migrate_data={}, version='4.2')
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'cast',
instance=self.fake_instance_obj,
block_migration='block_migration', host='host', version='4.0')
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
instance=self.fake_instance_obj)
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance_obj,
reservations=['uuid1', 'uuid2'])
def test_swap_volume(self):
self._test_compute_api('swap_volume', 'cast',
instance=self.fake_instance_obj, old_volume_id='oldid',
new_volume_id='newid')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance_obj, version='4.0')
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
migrate_data=None, version='4.0')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', clean_shutdown=True, version='4.1')
self.flags(compute='4.0', group='upgrade_levels')
expected_args = {'instance_type': self.fake_flavor}
self._test_compute_api('prep_resize', 'cast', expected_args,
instance=self.fake_instance_obj,
instance_type=self.fake_flavor_obj,
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node', clean_shutdown=True, version='4.0')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance_obj,
block_device_info={},
reboot_type='type')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
limits=None, version='4.5')
def test_rebuild_instance_downgrade(self):
self.flags(group='upgrade_levels', compute='4.0')
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, version='4.0')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance_obj, device='device',
volume_id='id', disk_bus='ide', device_type='cdrom',
version='4.0',
_return_value=objects_block_dev.BlockDeviceMapping())
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
def test_refresh_security_group_rules(self):
self._test_compute_api('refresh_security_group_rules', 'cast',
security_group_id='id', host='host', version='4.0')
def test_refresh_security_group_members(self):
self._test_compute_api('refresh_security_group_members', 'cast',
security_group_id='id', host='host', version='4.0')
def test_refresh_instance_security_rules(self):
expected_args = {'instance': self.fake_instance_obj}
self._test_compute_api('refresh_instance_security_rules', 'cast',
expected_args, host='fake_host',
instance=self.fake_instance_obj, version='4.4')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance_obj, address='addr',
version='4.0')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance_obj, volume_id='id', host='host',
version='4.0')
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref',
clean_shutdown=True, version='4.0')
def test_reset_network(self):
self._test_compute_api('reset_network', 'cast',
instance=self.fake_instance_obj)
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
reservations=list('fake_res'),
clean_shutdown=True, version='4.1')
self.flags(compute='4.0', group='upgrade_levels')
expected_args = {'instance_type': self.fake_flavor}
self._test_compute_api('resize_instance', 'cast', expected_args,
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type=self.fake_flavor_obj,
reservations=list('fake_res'),
clean_shutdown=True, version='4.0')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
instance=self.fake_instance_obj)
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance_obj, new_pass='pw',
version='4.0')
def test_set_host_enabled(self):
self._test_compute_api('set_host_enabled', 'call',
enabled='enabled', host='host')
def test_get_host_uptime(self):
self._test_compute_api('get_host_uptime', 'call', host='host')
def test_backup_instance(self):
self._test_compute_api('backup_instance', 'cast',
instance=self.fake_instance_obj, image_id='id',
backup_type='type', rotation='rotation')
def test_snapshot_instance(self):
self._test_compute_api('snapshot_instance', 'cast',
instance=self.fake_instance_obj, image_id='id')
def test_start_instance(self):
self._test_compute_api('start_instance', 'cast',
instance=self.fake_instance_obj)
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
instance=self.fake_instance_obj)
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance_obj, bdms=[],
reservations=['uuid1', 'uuid2'], version='4.0')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
instance=self.fake_instance_obj)
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance_obj, version='4.0')
def test_shelve_instance(self):
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id',
clean_shutdown=True, version='4.0')
def test_shelve_offload_instance(self):
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj,
clean_shutdown=True, version='4.0')
def test_unshelve_instance(self):
self._test_compute_api('unshelve_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
version='4.0')
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
create_info={}, version='4.0')
def test_volume_snapshot_delete(self):
self._test_compute_api('volume_snapshot_delete', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
snapshot_id='fake_id2', delete_info={}, version='4.0')
def test_external_instance_event(self):
self._test_compute_api('external_instance_event', 'cast',
instances=[self.fake_instance_obj],
events=['event'],
version='4.0')
def test_build_and_run_instance(self):
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='4.0')
def test_quiesce_instance(self):
self._test_compute_api('quiesce_instance', 'call',
instance=self.fake_instance_obj, version='4.0')
def test_unquiesce_instance(self):
self._test_compute_api('unquiesce_instance', 'cast',
instance=self.fake_instance_obj, mapping=None, version='4.0')
|
apache-2.0
|
oliverhr/odoo
|
addons/account_bank_statement_extensions/wizard/cancel_statement_line.py
|
381
|
1484
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class cancel_statement_line(osv.osv_memory):
_name = 'cancel.statement.line'
_description = 'Cancel selected statement lines'
def cancel_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'draft'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
hrautila/go.opt
|
tests/py/testsocp.py
|
1
|
1173
|
#
# This is copied from CVXOPT examples and modified to be used as test reference
# for corresponding Go program.
#
from cvxopt import matrix, solvers
import helpers
def testsocp(opts):
c = matrix([-2., 1., 5.])
G = [matrix( [[12., 13., 12.],
[ 6., -3., -12.],
[-5., -5., 6.]] ) ]
G += [matrix( [[ 3., 3., -1., 1.],
[-6., -6., -9., 19.],
[10., -2., -2., -3.]] ) ]
h = [ matrix( [-12., -3., -2.] ),
matrix( [27., 0., 3., -42.] ) ]
solvers.options.update(opts)
sol = solvers.socp(c, Gq = G, hq = h)
print "x = \n", helpers.str2(sol['x'], "%.9f")
print "zq[0] = \n", helpers.str2(sol['zq'][0], "%.9f")
print "zq[1] = \n", helpers.str2(sol['zq'][1], "%.9f")
print "\n *** running GO test ***"
helpers.run_go_test("../testsocp", {'x': sol['x'],
'sq0': sol['sq'][0],
'sq1': sol['sq'][1],
'zq0': sol['zq'][0],
'zq1': sol['zq'][1]})
testsocp({'maxiters': 10})
|
lgpl-3.0
|
xanderdunn/ycmd
|
cpp/ycm/tests/gmock/gtest/test/gtest_color_test.py
|
3259
|
4911
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
niklasf/syzygy-tables.info
|
syzygy_tables_info/model.py
|
1
|
3448
|
# This file is part of the syzygy-tables.info tablebase probing website.
# Copyright (C) 2015-2020 Niklas Fiekas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import dataclasses
import typing
from typing import List, Optional
try:
from typing import Literal
ColorName = Literal["white", "black"]
except ImportError:
ColorName = str # type: ignore
DEFAULT_FEN = "4k3/8/8/8/8/8/8/4K3 w - - 0 1"
EMPTY_FEN = "8/8/8/8/8/8/8/8 w - - 0 1"
try:
class RenderMove(typing.TypedDict):
san: str
uci: str
fen: str
dtm: Optional[int]
badge: str
dtz: int
zeroing: bool
capture: bool
checkmate: bool
insufficient_material: bool
stalemate: bool
except AttributeError:
RenderMove = dict # type: ignore
try:
class RenderDep(typing.TypedDict):
material: str
longest_fen: str
except AttributeError:
RenderDep = dict # type: ignore
try:
class RenderStatsLongest(typing.TypedDict):
label: str
fen: str
except AttributeError:
RenderStatsLongest = dict # type: ignore
try:
class RenderStatsHist(typing.TypedDict, total=False):
empty: int
ply: int
num: int
width: int
active: bool
except AttributeError:
RenderStatsHist = dict # type: ignore
try:
class RenderStats(typing.TypedDict, total=False):
material_side: str
material_other: str
white: int
cursed: int
draws: int
blessed: int
black: int
white_pct: float
cursed_pct: float
draws_pct: float
blessed_pct: float
black_pct: float
longest: List[RenderStatsLongest]
verb: str # winning, losing
histogram: List[RenderStatsHist]
except AttributeError:
RenderStats = dict # type: ignore
try:
class Render(typing.TypedDict, total=False):
material: str
normalized_material: str
thumbnail_url: str
turn: ColorName
fen: str
white_fen: str
black_fen: str
clear_fen: str
swapped_fen: str
horizontal_fen: str
vertical_fen: str
fen_input: str
status: str
winning_side: Optional[ColorName]
frustrated: bool
winning_moves: List[RenderMove]
cursed_moves: List[RenderMove]
drawing_moves: List[RenderMove]
unknown_moves: List[RenderMove]
blessed_moves: List[RenderMove]
losing_moves: List[RenderMove]
illegal: bool
insufficient_material: bool
blessed_loss: bool
cursed_win: bool
is_table: bool
deps: List[RenderDep]
stats: Optional[RenderStats]
except AttributeError:
Render = dict # type: ignore
|
agpl-3.0
|
zishell/shadowsocks
|
tests/test_udp_src.py
|
1009
|
2482
|
#!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
|
apache-2.0
|
tobley/wns
|
travis_pypi_setup.py
|
1
|
3730
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'tobley/wns'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
|
isc
|
undertherain/vsmlib
|
vsmlib/corpus/corpus.py
|
1
|
2196
|
import numpy as np
import fnmatch
import os
import re
from vsmlib.misc.data import detect_archive_format_and_open
import logging
logger = logging.getLogger(__name__)
_default_tokenizer_patter = r"[\w\-']+|[.,!?…]"
class LineTokenizer:
def __init__(self, re_pattern=_default_tokenizer_patter):
self.re_token = re.compile(re_pattern)
def __call__(self, s):
tokens = self.re_token.findall(s)
return tokens
class FileTokenIterator:
def __init__(self, path, re_pattern=_default_tokenizer_patter):
self.path = path
self.tokenizer = LineTokenizer(re_pattern)
def __iter__(self):
return self.next()
def next(self):
with detect_archive_format_and_open(self.path) as f:
for line in f:
s = line.strip().lower()
# todo lower should be parameter
tokens = self.tokenizer(s)
for token in tokens:
yield token
class DirTokenIterator:
def __init__(self, path, re_pattern=_default_tokenizer_patter):
self.path = path
self.__gen__ = self.gen(re_pattern)
def __iter__(self):
return self
def __next__(self):
return next(self.__gen__)
def gen(self, re_pattern):
for root, dir, files in os.walk(self.path, followlinks=True):
for items in fnmatch.filter(files, "*"):
logger.info("processing " + os.path.join(root, items))
for token in FileTokenIterator(os.path.join(root, items), re_pattern=re_pattern):
yield(token)
def load_file_as_ids(path, vocabulary, gzipped=None, downcase=True, re_pattern=r"[\w\-']+|[.,!?…]"):
# use proper tokenizer from cooc
# options to ignore sentence bounbdaries
# specify what to do with missing words
# replace numbers with special tokens
result = []
ti = FileTokenIterator(path, re_pattern=re_pattern)
for token in ti:
w = token # specify what to do with missing words
if downcase:
w = w.lower()
result.append(vocabulary.get_id(w))
return np.array(result, dtype=np.int32)
def main():
print("test")
|
apache-2.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/mando/tests/test_utils.py
|
3
|
3578
|
import unittest
from paramunittest import parametrized
from mando.utils import action_by_type, ensure_dashes, find_param_docs, split_doc
@parametrized(
dict(obj=True, result={'action': 'store_false'}),
dict(obj=False, result={'action': 'store_true'}),
dict(obj=[], result={'action': 'append'}),
dict(obj=[1, False], result={'action': 'append'}),
dict(obj=None, result={}),
dict(obj=1, result={'type': type(1)}),
dict(obj=1.1, result={'type': type(1.1)}),
dict(obj='1', result={'type': type('1')}),
)
class TestActionByType(unittest.TestCase):
def setParameters(self, obj, result):
self.obj = obj
self.result = result
def testFunc(self):
self.assertEqual(self.result, action_by_type(self.obj))
@parametrized(
(['m'], ['-m']),
(['m', 'min'], ['-m', '--min']),
(['-m'], ['-m']),
(['-m', 'min'], ['-m', '--min']),
(['m', '--min'], ['-m', '--min']),
(['-m', '--min'], ['-m', '--min']),
(['-m', '--min', 'l', 'less'], ['-m', '--min', '-l', '--less']),
)
class TestEnsureDashes(unittest.TestCase):
def setParameters(self, opts, result):
self.opts = opts
self.result = result
def testFunc(self):
self.assertEqual(self.result, list(ensure_dashes(self.opts)))
@parametrized(
('', ['', '']),
('only help.', ['only help.', 'only help.']),
('help.\nstill help.', ['help.\nstill help.', 'help.\nstill help.']),
('help\n\ndesc', ['help', 'desc']),
('help\n\n\ndesc\n', ['help', 'desc']),
)
class TestSplitDoc(unittest.TestCase):
def setParameters(self, doc, parts):
self.doc = doc
self.parts = parts
def testFunc(self):
self.assertEqual(self.parts, split_doc(self.doc))
a_1 = {'a_param': (['a-param'], {'help': 'Short story.'})}
a_1_1 = {'a_param': (['a_param'], {'help': 'Short story.'})}
a_2 = {'j': (['-j'], {'help': 'Woow'})}
a_3 = {'noun': (['-n', '--noun'], {'help': 'cat'})}
a_all = {}
for a in (a_1, a_2, a_3):
a_all.update(a)
@parametrized(
dict(doc='', params={}),
dict(doc='Brevity is the soul of wit.', params={}),
dict(doc=':param a-param: Short story.', params=a_1),
dict(doc=':param a_param: Short story.', params=a_1_1),
dict(doc=':param -j: Woow', params=a_2),
dict(doc=':param -n, --noun: cat', params=a_3),
dict(doc='''
Some short text here and there.
:param well: water''', params={'well': (['well'], {'help': 'water'})}),
dict(doc='''
:param a-param: Short story.
:param -j: Woow
:param -n, --noun: cat''', params=a_all),
dict(doc='''
Lemme see.
:param long-story: A long storey belive me: when all started, Adam and Bob were just two little farmers.
''', params={'long_story': (['long-story'], {'help': 'A long storey '\
'belive me: when all started, Adam and '\
'Bob were just two little farmers.'})}),
)
class TestFindParamDocs(unittest.TestCase):
def setParameters(self, doc, params):
self.doc = doc
self.params = params
def testFunc(self):
found_params = find_param_docs(self.doc)
self.assertTrue(self.params.keys() == found_params.keys())
for key, value in self.params.items():
self.assertTrue(key in found_params)
found_value = found_params[key]
self.assertTrue(value[0] == found_value[0])
for kwarg, val in value[1].items():
self.assertTrue(val == found_value[1][kwarg])
|
agpl-3.0
|
ukanga/SickRage
|
lib/backports/ssl_match_hostname/__init__.py
|
134
|
3650
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
gpl-3.0
|
Bashar/django
|
django/contrib/gis/db/models/fields.py
|
28
|
12553
|
from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.constants import GIS_LOOKUPS
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
form_class = forms.GeometryField
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Always include SRID for less fragility; include others if they're
# not the default values.
kwargs['srid'] = self.srid
if self.dim != 2:
kwargs['dim'] = self.dim
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection).lower() in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
for lookup_name in GIS_LOOKUPS:
lookup = type(lookup_name, (GISLookup,), {'lookup_name': lookup_name})
GeometryField.register_lookup(lookup)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
|
bsd-3-clause
|
stjj89/iop
|
idapython/reentrant_int_scanner.py
|
1
|
4386
|
# Copyright (c) <2013-2014>, <Samuel J. Tan ([email protected])>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ====================================
# reentrant_int_scanner.py
# Reentrant interrupt scanner
# IDAPython script
# By Samuel Tan <[email protected]>
# ====================================
#
# Lists instructions within an interrupt handler
# potentially re-enables the interrupt itself
# (i.e. by writing to the register containng the
# interrupt enable bit and/or the status register),
# thereby making it reentrant.
#
# User provides the address of the first instruction
# in the instruction handler, and how many instructions
# to search ahead from that instruction.
#
#
from idaapi import *
# Switch to control debug print statements
DEBUG = True
# Returns true iff the ie_reg_addr or the SR is used as an
# instruction operand
def potential_ie_set_instr(ea, ie_reg_addr):
op_type_1 = GetOpType(ea, 0)
op_type_2 = GetOpType(ea, 1)
op_val_1 = GetOperandValue(ea, 0)
op_val_2 = GetOperandValue(ea, 1)
op_text_1 = GetOpnd(ea, 0)
op_text_2 = GetOpnd(ea, 1)
# ie_reg_addr used as a memory reference operand
if (op_type_1 == 2) and (op_val_1 == ie_reg_addr):
return True
if (op_type_2 == 2) and (op_val_2 == ie_reg_addr):
return True
# SR is an operand
if (op_type_1 == 1) and (op_text_1 == 'SR'):
return True
if (op_type_2 == 1) and (op_text_2 == 'SR'):
return True
return False
# Search all possible execution paths from the
# given head for a potential interrupt enable instruction,
# printing them if they are encountered
# Terminates at a maximum depth to prevent infinitely
# searching through spin loops
def find_ie_instr(head, seg_ea, ie_reg_addr, max_depth):
# Terminate search at max depth
if (max_depth == 0):
return
curr_ea = NextHead(head, seg_ea)
instr_name = GetMnem(curr_ea)
if (not isCode(GetFlags(curr_ea))) or (instr_name == "reti"):
if (instr_name == "reti"):
if DEBUG:
print "#DEBUG RETI reached at %04x" %(curr_ea)
return
if (potential_ie_set_instr(curr_ea, ie_reg_addr)):
print "0x%04x\t%s" %(curr_ea, GetDisasm(curr_ea))
# Call recursively on all possible branches
for ref in CodeRefsFrom(curr_ea, 1):
find_ie_instr(ref, seg_ea, ie_reg_addr, max_depth-1)
def find_all_ie_instr():
ea = AskAddr(ScreenEA(), 'Please enter interrupt handler start address')
ie_reg_addr = AskAddr(0x0, 'Please enter address of peripheral register'
'containing interrupt enable bit')
max_depth = AskLong(100, 'Please enter a max search depth')
print 'Instructions re-enabling interrupts within handler'
print '==================================================='
find_ie_instr(ea, SegEnd(ea), ie_reg_addr, max_depth)
print ''
# Executed command
find_all_ie_instr()
|
bsd-3-clause
|
tensorflow/models
|
research/lstm_object_detection/metrics/coco_evaluation_all_frames.py
|
2
|
5494
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating video object detections with COCO metrics."""
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_evaluation
from object_detection.metrics import coco_tools
class CocoEvaluationAllFrames(coco_evaluation.CocoDetectionEvaluator):
"""Class to evaluate COCO detection metrics for frame sequences.
The class overrides two functions: add_single_ground_truth_image_info and
add_single_detected_image_info.
For the evaluation of sequence video detection, by iterating through the
entire groundtruth_dict, all the frames in the unrolled frames in one LSTM
training sample are considered. Therefore, both groundtruth and detection
results of all frames are added for the evaluation. This is used when all the
frames are labeled in the video object detection training job.
"""
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Add groundtruth results of all frames to the eval pipeline.
This method overrides the function defined in the base class.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A list of dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
"""
for idx, gt in enumerate(groundtruth_dict):
if not gt:
continue
image_frame_id = '{}_{}'.format(image_id, idx)
if image_frame_id in self._image_ids:
tf.logging.warning(
'Ignoring ground truth with image id %s since it was '
'previously added', image_frame_id)
continue
self._groundtruth_list.extend(
coco_tools.ExportSingleImageGroundtruthToCoco(
image_id=image_frame_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=gt[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=gt[
standard_fields.InputDataFields.groundtruth_classes]))
self._annotation_id += (
gt[standard_fields.InputDataFields.groundtruth_boxes].shape[0])
# Boolean to indicate whether a detection has been added for this image.
self._image_ids[image_frame_id] = False
def add_single_detected_image_info(self, image_id, detections_dict):
"""Add detection results of all frames to the eval pipeline.
This method overrides the function defined in the base class.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A list of dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
for idx, det in enumerate(detections_dict):
if not det:
continue
image_frame_id = '{}_{}'.format(image_id, idx)
if image_frame_id not in self._image_ids:
raise ValueError(
'Missing groundtruth for image-frame id: {}'.format(image_frame_id))
if self._image_ids[image_frame_id]:
tf.logging.warning(
'Ignoring detection with image id %s since it was '
'previously added', image_frame_id)
continue
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_frame_id,
category_id_set=self._category_id_set,
detection_boxes=det[
standard_fields.DetectionResultFields.detection_boxes],
detection_scores=det[
standard_fields.DetectionResultFields.detection_scores],
detection_classes=det[
standard_fields.DetectionResultFields.detection_classes]))
self._image_ids[image_frame_id] = True
|
apache-2.0
|
nexdatas/tools
|
test/NXSCreatePyEvalH5Cpp_test.py
|
1
|
85203
|
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2018 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file XMLConfiguratorTest.py
# unittests for field Tags running Tango Server
#
import unittest
import os
import sys
import random
import struct
import binascii
import shutil
import socket
import pickle
# import time
# import threading
import PyTango
# import json
# import nxstools
# from nxstools import nxscreate
# from nxstools import nxsdevicetools
import nxstools.h5cppwriter as H5CppWriter
try:
import TestServerSetUp
except ImportError:
from . import TestServerSetUp
if sys.version_info > (3,):
unicode = str
long = int
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
class TstRoot(object):
filename = ""
class TstRoot2(object):
filename = ""
stepsperfile = 0
currentfileid = 0
# test fixture
class NXSCreatePyEvalH5CppTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
try:
# random seed
self.seed = long(binascii.hexlify(os.urandom(16)), 16)
except NotImplementedError:
import time
# random seed
self.seed = long(time.time() * 256) # use fractional seconds
self._rnd = random.Random(self.seed)
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
self.__args = '{"host":"localhost", "db":"nxsconfig", ' \
'"read_default_file":"/etc/my.cnf", "use_unicode":true}'
# home = expanduser("~")
db = PyTango.Database()
self.host = db.get_db_host().split(".")[0]
self.port = db.get_db_port()
self.directory = "."
self.flags = "-d . "
# self.flags = " -d -r testp09/testmcs/testr228 "
self.device = 'testp09/testmcs/testr228'
self.fwriter = H5CppWriter
self.maxDiff = None
# test starter
# \brief Common set up
def setUp(self):
print("\nsetting up...")
print("SEED = %s" % self.seed)
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# float list tester
def myAssertFloatList(self, list1, list2, error=0.0):
self.assertEqual(len(list1), len(list2))
for i, el in enumerate(list1):
if abs(el - list2[i]) >= error:
print("EL %s %s %s" % (el, list2[i], error))
self.assertTrue(abs(el - list2[i]) < error)
# float image tester
def myAssertImage(self, image1, image2, error=None):
self.assertEqual(len(image1), len(image2))
for i in range(len(image1)):
self.assertEqual(len(image1[i]), len(image2[i]))
for j in range(len(image1[i])):
if error is not None:
if abs(image1[i][j] - image2[i][j]) >= error:
print("EL %s %s %s" % (
image1[i][j], image2[i][j], error))
self.assertTrue(abs(image1[i][j] - image2[i][j]) < error)
else:
self.assertEqual(image1[i][j], image2[i][j])
# float image tester
def myAssertVector(self, image1, image2, error=None):
self.assertEqual(len(image1), len(image2))
for i in range(len(image1)):
self.assertEqual(len(image1[i]), len(image2[i]))
for j in range(len(image1[i])):
self.assertEqual(len(image1[i][j]), len(image2[i][j]))
for k in range(len(image1[i][j])):
if error is not None:
if abs(image1[i][j][k] - image2[i][j][k]) >= error:
print("EL %s %s %s" % (
image1[i][j][k], image2[i][j][k], error))
self.assertTrue(
abs(image1[i][j][k] - image2[i][j][k]) < error)
else:
self.assertEqual(image1[i][j][k], image2[i][j][k])
def test_lambdavds_savefilename_cb(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import lambdavds
commonblock = {}
sfn1 = "myfile1"
sfn2 = "myfile2"
fn1 = lambdavds.savefilename_cb(
commonblock, sfn1, "lmbd_savefilename")
self.assertEqual(fn1, sfn1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_savefilename" in commonblock)
self.assertEqual(len(commonblock["lmbd_savefilename"]), 1)
self.assertEqual(commonblock["lmbd_savefilename"][0], sfn1)
fn2 = lambdavds.savefilename_cb(
commonblock, sfn2, "lmbd_savefilename")
self.assertEqual(fn2, sfn2)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_savefilename" in commonblock)
self.assertEqual(len(commonblock["lmbd_savefilename"]), 2)
self.assertEqual(commonblock["lmbd_savefilename"][1], sfn2)
def test_lambdavds_framenumbers_cb(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import lambdavds
commonblock = {}
sfn1 = "34"
sfn2 = 3
rfn1 = 34
rfn2 = 3
fn1 = lambdavds.framenumbers_cb(
commonblock, sfn1, "lmbd_framenumbers")
self.assertEqual(fn1, sfn1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_framenumbers" in commonblock)
self.assertEqual(len(commonblock["lmbd_framenumbers"]), 1)
self.assertEqual(commonblock["lmbd_framenumbers"][0], rfn1)
fn2 = lambdavds.framenumbers_cb(
commonblock, sfn2, "lmbd_framenumbers")
self.assertEqual(fn2, sfn2)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_framenumbers" in commonblock)
self.assertEqual(len(commonblock["lmbd_framenumbers"]), 2)
self.assertEqual(commonblock["lmbd_framenumbers"][1], rfn2)
def test_common_get_element(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import common
self.assertEqual(common.get_element([1, 2, 3, 4, 5], 3), 4)
self.assertEqual(common.get_element([2, 3, 4, 5], 1), 3)
def test_blockitem_int(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import common
commonblock = {}
sfn1 = "34"
sfn2 = 3
rfn1 = 34
rfn2 = 3
fn1 = common.blockitem_addint(
commonblock, "lmbd2_framenumbers", sfn1)
self.assertEqual(fn1, sfn1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd2_framenumbers" in commonblock)
self.assertEqual(len(commonblock["lmbd2_framenumbers"]), 1)
self.assertEqual(commonblock["lmbd2_framenumbers"][0], rfn1)
fn2 = common.blockitem_addint(
commonblock, "lmbd2_framenumbers", sfn2)
self.assertEqual(fn2, sfn2)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd2_framenumbers" in commonblock)
self.assertEqual(len(commonblock["lmbd2_framenumbers"]), 2)
self.assertEqual(commonblock["lmbd2_framenumbers"][1], rfn2)
fn2 = common.blockitems_rm(
commonblock, ["lmbd2_framenumbers"])
self.assertEqual(len(commonblock), 0)
def test_blockitem(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import common
commonblock = {}
sfn1 = "myfile1"
sfn2 = "myfile2"
fn1 = common.blockitem_add(
commonblock, "lmbd_filename", sfn1)
self.assertEqual(fn1, sfn1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_filename" in commonblock)
self.assertEqual(len(commonblock["lmbd_filename"]), 1)
self.assertEqual(commonblock["lmbd_filename"][0], sfn1)
fn2 = common.blockitem_add(
commonblock, "lmbd_filename", sfn2)
self.assertEqual(fn2, sfn2)
self.assertEqual(len(commonblock), 1)
self.assertTrue("lmbd_filename" in commonblock)
self.assertEqual(len(commonblock["lmbd_filename"]), 2)
self.assertEqual(commonblock["lmbd_filename"][1], sfn2)
fn2 = common.blockitems_rm(
commonblock, ["lmbd_filename"])
self.assertEqual(len(commonblock), 0)
def test_common_filestartnum_cb(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import common
commonblock = {}
sfn1 = 3
nbn1 = 2
fn1 = common.filestartnum_cb(
commonblock, sfn1, nbn1, "andor_filestartnum")
self.assertEqual(fn1, sfn1 - nbn1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("andor_filestartnum" in commonblock)
self.assertEqual(
commonblock["andor_filestartnum"], sfn1 - nbn1 + 1)
def test_lambdavds_triggermode_cb_nosave(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
commonblock = {}
name = "lmbd"
triggermode = 0
saveallimages = False
framesperfile = 10
height = 2321
width = 32
opmode = 6
filepostfix = "nxs"
from nxstools.pyeval import lambdavds
result = lambdavds.triggermode_cb(
commonblock,
name,
triggermode,
saveallimages,
framesperfile,
height,
width,
opmode,
filepostfix,
"lmbd_savefilename",
"lmbd_framenumbers",
"myfile_24234.nxs",
"entry1234")
self.assertEqual(triggermode, result)
def test_beamtimeid_nodir(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tstroot = TstRoot()
commonblock = {"__nxroot__": tstroot}
tstroot.filename = "/mypath"
start_time = "14:13:12"
shortname = "P00"
commissiondir = "/testgpfs/commission"
currentdir = "/testgpfs/current"
localdir = "/testgpfs/local"
currentprefix = "/testgpfs"
currentpostfix = "current"
commissionprefix = "/testgpfs"
commissionpostfix = "commission"
sgh = socket.gethostname()
btid = "%s_%s@%s" % (shortname, start_time, sgh)
from nxstools.pyeval import beamtimeid
result = beamtimeid.beamtimeid(
commonblock, start_time, shortname,
commissiondir, currentdir, localdir,
currentprefix, currentpostfix,
commissionprefix, commissionpostfix)
self.assertEqual(btid, result)
def test_beamtimeid_current(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
cwd = os.getcwd()
tstroot = TstRoot()
commonblock = {"__nxroot__": tstroot}
tstroot.filename = "%s/testcurrent/myfile.nxs" % cwd
start_time = "14:13:12"
shortname = "P00"
currentdir = "%s" % cwd
currentprefix = "beamtime-metadata-"
currentpostfix = ".json"
commissiondir = "/testgpfs/commission"
commissionprefix = "beam-metadata-"
commissionpostfix = ".jsn"
localdir = "/testgpfs/local"
beamtime = "2342342"
bfn = "%s/%s%s%s" % (cwd, currentprefix, beamtime, currentpostfix)
try:
open(bfn, 'a').close()
from nxstools.pyeval import beamtimeid
result = beamtimeid.beamtimeid(
commonblock, start_time, shortname,
commissiondir, currentdir, localdir,
currentprefix, currentpostfix,
commissionprefix, commissionpostfix)
self.assertEqual(beamtime, result)
finally:
os.remove(bfn)
def test_beamtimeid_commission(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
cwd = os.getcwd()
tstroot = TstRoot()
commonblock = {"__nxroot__": tstroot}
tstroot.filename = "%s/testcurrent/myfile.nxs" % cwd
start_time = "14:13:12"
shortname = "P00"
currentdir = "/testgpfs/current"
currentprefix = "bmtime-metadata-"
currentpostfix = ".jsn"
commissiondir = "%s" % cwd
commissionprefix = "beamtime-metadata-"
commissionpostfix = ".json"
localdir = "/testgpfs/local"
beamtime = "2342342"
bfn = "%s/%s%s%s" % (
cwd, commissionprefix, beamtime, commissionpostfix)
try:
open(bfn, 'a').close()
from nxstools.pyeval import beamtimeid
result = beamtimeid.beamtimeid(
commonblock, start_time, shortname,
commissiondir, currentdir, localdir,
currentprefix, currentpostfix,
commissionprefix, commissionpostfix)
self.assertEqual(beamtime, result)
finally:
os.remove(bfn)
def test_beamtimeid_local(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
cwd = os.getcwd()
tstroot = TstRoot()
commonblock = {"__nxroot__": tstroot}
tstroot.filename = "%s/testcurrent/myfile.nxs" % cwd
start_time = "14:13:12"
shortname = "P00"
currentdir = "/testgpfs/current"
currentprefix = "bmtime-metadata-"
currentpostfix = ".jsn"
commissiondir = "/testgpfs/"
commissionprefix = "beamtime-metadata-"
commissionpostfix = ".json"
localdir = "%s" % cwd
beamtime = "2342342"
bfn = "%s/%s%s%s" % (
cwd, commissionprefix, beamtime, commissionpostfix)
try:
open(bfn, 'a').close()
from nxstools.pyeval import beamtimeid
result = beamtimeid.beamtimeid(
commonblock, start_time, shortname,
commissiondir, currentdir, localdir,
currentprefix, currentpostfix,
commissionprefix, commissionpostfix)
self.assertEqual(beamtime, result)
finally:
os.remove(bfn)
def test_lambdavds_triggermode_cb_onefile(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "lmbd"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = '%s_00000.nxs' % (fileprefix)
sfname1 = '%s_00000' % (fileprefix)
ffname1 = '%s/%s' % (path, fname1)
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
fl1 = self.fwriter.create_file(ffname1, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint32", [30, 10, 20], [1, 10, 20])
intimage[...] = vl
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"lmbd_savefilename": [sfname1],
"lmbd_framenumbers": [30],
"__root__": rt,
}
triggermode = 0
saveallimages = True
framesperfile = 0
height = 10
width = 20
opmode = 24
filepostfix = "nxs"
from nxstools.pyeval import lambdavds
result = lambdavds.triggermode_cb(
commonblock,
name,
triggermode,
saveallimages,
framesperfile,
height,
width,
opmode,
filepostfix,
"lmbd_savefilename",
"lmbd_framenumbers",
filename,
entryname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_lambdavds_triggermode_cb_singleframe(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "lmbd"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nxs' % (fileprefix, i) for i in range(30)]
sfname1 = ['%s_%05d' % (fileprefix, i) for i in range(30)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint32", [1, 10, 20], [1, 10, 20])
vv = [[[vl[i][jj][ii] for ii in range(20)]
for jj in range(10)]]
intimage[0, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"lmbd_savefilename": sfname1,
"lmbd_framenumbers": [1] * 30,
"__root__": rt,
}
triggermode = 0
saveallimages = True
framesperfile = 0
height = 10
width = 20
opmode = 24
filepostfix = "nxs"
from nxstools.pyeval import lambdavds
result = lambdavds.triggermode_cb(
commonblock,
name,
triggermode,
saveallimages,
framesperfile,
height,
width,
opmode,
filepostfix,
"lmbd_savefilename",
"lmbd_framenumbers",
filename,
entryname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_lambdavds_triggermode_cb_splitmode(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "lmbd"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_00000_part%05d.nxs' % (fileprefix, i) for i in range(3)]
sfname1 = ['%s_00000_part%05d' % (fileprefix, i) for i in range(3)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [14, 14, 2]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint32",
[framenumbers[i], 10, 20], [1, 10, 20])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"lmbd_savefilename": sfname1,
"lmbd_framenumbers": framenumbers,
"__root__": rt,
}
triggermode = 0
saveallimages = True
framesperfile = 14
height = 10
width = 20
opmode = 24
filepostfix = "nxs"
from nxstools.pyeval import lambdavds
result = lambdavds.triggermode_cb(
commonblock,
name,
triggermode,
saveallimages,
framesperfile,
height,
width,
opmode,
filepostfix,
"lmbd_savefilename",
"lmbd_framenumbers",
filename,
entryname)
self.assertEqual(triggermode, result)
images = det.open("data")
fl.flush()
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_signalname_detector(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
self._fname = "%s_%s.nxs" % (mfileprefix, scanid)
try:
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
fl.writer = self.fwriter
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
dt = entry.create_group("data", "NXdata")
dt.create_field(
"pilatus", "uint32", [30, 30, 20], [1, 30, 20]).close()
dt.create_field(
"lambda", "uint32", [30, 30, 10], [1, 30, 10]).close()
dt.create_field("exp_c01", "uint32", [30], [1]).close()
dt.create_field("exp_t01", "uint32", [30], [1]).close()
dt.create_field("exp_c02", "uint32", [30], [1]).close()
signalname = "lambda"
commonblock = {"__root__": rt}
detector = "lambda"
firstchannel = "exp_c01"
timers = "exp_t01 exp_t02"
mgchannels = "pilatus exp_c01 exp_c02 ext_t01"
entryname = "entry123"
from nxstools.pyeval import datasignal
result = datasignal.signalname(
commonblock,
detector,
firstchannel,
timers,
mgchannels,
entryname,
True)
self.assertEqual(signalname, result)
self.assertTrue("default" in rt.attributes.names())
endef = rt.attributes["default"][...]
self.assertEqual(endef, entryname)
self.assertTrue("default" in entry.attributes.names())
dtdef = entry.attributes["default"][...]
self.assertEqual(dtdef, "data")
dt.close()
entry.close()
fl.close()
finally:
if os.path.exists(self._fname):
os.remove(self._fname)
def test_signalname_firstchannel(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
self._fname = "%s_%s.nxs" % (mfileprefix, scanid)
try:
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
fl.writer = self.fwriter
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
dt = entry.create_group("data", "NXdata")
dt.create_field(
"pilatus", "uint32", [30, 30, 20], [1, 30, 20]).close()
dt.create_field(
"lambda", "uint32", [30, 30, 10], [1, 30, 10]).close()
dt.create_field("exp_c01", "uint32", [30], [1]).close()
dt.create_field("exp_t01", "uint32", [30], [1]).close()
dt.create_field("exp_c02", "uint32", [30], [1]).close()
signalname = "exp_c01"
commonblock = {"__root__": rt}
detector = "lambda2"
firstchannel = "exp_c01"
timers = "exp_t01 exp_t02"
mgchannels = "pilatus exp_c01 exp_c02 ext_t01"
entryname = "entry123"
from nxstools.pyeval import datasignal
result = datasignal.signalname(
commonblock,
detector,
firstchannel,
timers,
mgchannels,
entryname,
False
)
self.assertEqual(signalname, result)
self.assertTrue("default" not in rt.attributes.names())
self.assertTrue("default" not in entry.attributes.names())
dt.close()
entry.close()
fl.close()
finally:
if os.path.exists(self._fname):
os.remove(self._fname)
def test_signalname_mgchannels(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
self._fname = "%s_%s.nxs" % (mfileprefix, scanid)
try:
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
fl.writer = self.fwriter
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
dt = entry.create_group("data", "NXdata")
dt.create_field(
"pilatus", "uint32", [30, 30, 20], [1, 30, 20]).close()
dt.create_field(
"lambda", "uint32", [30, 30, 10], [1, 30, 10]).close()
dt.create_field("exp_c01", "uint32", [30], [1]).close()
dt.create_field("exp_t01", "uint32", [30], [1]).close()
dt.create_field("exp_c02", "uint32", [30], [1]).close()
signalname = "pilatus"
commonblock = {"__root__": rt}
detector = "lambda2"
firstchannel = "exp_c03"
timers = "exp_t01 exp_t02"
mgchannels = "pilatus exp_c01 exp_c02 ext_t01"
entryname = "entry123"
from nxstools.pyeval import datasignal
result = datasignal.signalname(
commonblock,
detector,
firstchannel,
timers,
mgchannels,
entryname)
self.assertEqual(signalname, result)
dt.close()
entry.close()
fl.close()
finally:
if os.path.exists(self._fname):
os.remove(self._fname)
def test_signalname_alphabetic(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
self._fname = "%s_%s.nxs" % (mfileprefix, scanid)
try:
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
fl.writer = self.fwriter
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
dt = entry.create_group("data", "NXdata")
dt.create_field(
"pilatus", "uint32", [30, 30, 20], [1, 30, 20]).close()
dt.create_field(
"lambda", "uint32", [30, 30, 10], [1, 30, 10]).close()
dt.create_field("exp_c01", "uint32", [30], [1]).close()
dt.create_field("exp_t01", "uint32", [30], [1]).close()
dt.create_field("exp_c02", "uint32", [30], [1]).close()
signalname = "exp_c01"
commonblock = {"__root__": rt}
detector = "lambda2"
firstchannel = "exp_c03"
timers = "exp_t01 exp_t02"
mgchannels = "exp_c03"
entryname = "entry123"
from nxstools.pyeval import datasignal
result = datasignal.signalname(
commonblock,
detector,
firstchannel,
timers,
mgchannels,
entryname)
self.assertEqual(signalname, result)
dt.close()
entry.close()
fl.close()
finally:
if os.path.exists(self._fname):
os.remove(self._fname)
def test_signalname_nofields(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
self._fname = "%s_%s.nxs" % (mfileprefix, scanid)
try:
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
fl.writer = self.fwriter
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
dt = entry.create_group("data", "NXdata")
signalname = ""
commonblock = {"__root__": rt}
detector = "lambda2"
firstchannel = "exp_c03"
timers = "exp_t01 exp_t02"
mgchannels = "exp_c03"
entryname = "entry123"
from nxstools.pyeval import datasignal
result = datasignal.signalname(
commonblock,
detector,
firstchannel,
timers,
mgchannels,
entryname)
self.assertEqual(signalname, result)
dt.close()
entry.close()
fl.close()
finally:
if os.path.exists(self._fname):
os.remove(self._fname)
def test_absorber_thickness(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
position = 6
thicknesslist = "[3.2,23.23,123.4,12345.3]"
thl = [0, 23.23, 123.4, 0]
from nxstools.pyeval import absorber
result = absorber.thickness(position, thicknesslist)
self.assertEqual(thl, result)
def test_absorber_foil(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
position = 45
foillist = '["Ag", "Ag", "Ag", "Ag", "", "Al", "Al", "Al", "Al"]'
thl = ["Ag", "", "Ag", "Ag", "", "Al", "", "", ""]
from nxstools.pyeval import absorber
result = absorber.foil(position, foillist)
self.assertEqual(thl, result)
def test_qbpm_foil(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
position = 25
foildict = '{"Ti": 43, "Ni": 23, "Out": 4}'
foil = "Ni"
from nxstools.pyeval import qbpm
result = qbpm.foil(position, foildict)
self.assertEqual(foil, result)
def test_mssar_env(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
env = {"new": {
'NeXusSelectorDevice': 'nxs/nxsrecselector/dellek',
'ScanFile': ['sdfsdf.nxs', 'sdfsdf.fio'],
'ScanDir': '/tmp'}}
penv = pickle.dumps(env)
value = "/tmp"
varname = "ScanDir"
from nxstools.pyeval import mssar
result = mssar.mssarenv(penv, varname)
self.assertEqual(value, result)
def test_msnsar_env(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
env = {"new": {
'NeXusSelectorDevice': 'nxs/nxsrecselector/dellek',
'ScanFile': ['sdfsdf.nxs', 'sdfsdf.fio'],
'ScanDir': '/tmp'}}
penv = pickle.dumps(env)
values = 'sdfsdf.fio'
varnames = '["ScanFile", 1]'
from nxstools.pyeval import mssar
result = mssar.msnsarenv(penv, varnames)
self.assertEqual(values, result)
def test_lmbd_m2_external_data(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
commonblock = {}
name = "lmbd"
savefilename = "mtest_2342"
saveallimages = 1
filepostfix = "nxs"
filename = "/tmp/scans/mytest_324234.nxs"
modulename = "m2"
sfn1 = "mytest_324234/lmbd/mtest_2342_m2.nxs:" \
"//entry/instrument/detector"
sfn2 = "lmbd/mtest_2342_m2.nxs://entry/instrument/detector"
from nxstools.pyeval import lmbd
fn1 = lmbd.m2_external_data(
commonblock, name, savefilename, saveallimages,
filepostfix, filename, modulename)
self.assertEqual(fn1, sfn1)
fn1 = lmbd.m2_external_data(
commonblock, name, savefilename, False,
filepostfix, filename, modulename)
self.assertEqual(fn1, "")
fn2 = lmbd.m2_external_data(
commonblock, name, savefilename, saveallimages,
filepostfix, "", modulename)
self.assertEqual(fn2, sfn2)
def test_lmbd_external_data(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
name = "lmbd"
savefilename = "mtest_2342"
saveallimages = 1
filepostfix = "nxs"
framesperfile = 40
framenumbers = 20
filename = "/tmp/scans/mytest_324234.nxs"
sfn1 = "mytest_324234/lmbd/mtest_2342.nxs:" \
"//entry/instrument/detector"
sfn2 = "lmbd/mtest_2342.nxs://entry/instrument/detector"
sfn3 = "lmbd/mtest_2342_part00000.nxs://entry/instrument/detector"
sfn4 = "mytest_324234/lmbd/mtest_2342_part00002.nxs:" \
"//entry/instrument/detector"
from nxstools.pyeval import lmbd
fn1 = lmbd.external_data(
commonblock, name, savefilename, saveallimages,
framesperfile, framenumbers,
filepostfix, filename)
self.assertEqual(fn1, sfn1)
fn1 = lmbd.external_data(
commonblock, name, savefilename, False,
framesperfile, framenumbers,
filepostfix, filename)
self.assertEqual(fn1, "")
fn2 = lmbd.external_data(
commonblock, name, savefilename, saveallimages,
framesperfile, framenumbers,
filepostfix, "")
self.assertEqual(fn2, sfn2)
framesperfile = 20
framenumbers = 50
fn2 = lmbd.external_data(
commonblock, name, savefilename, saveallimages,
framesperfile, framenumbers,
filepostfix, "")
self.assertEqual(fn2, sfn2)
framesperfile = 20
framenumbers = 50
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
fn2 = lmbd.external_data(
commonblock, name, savefilename, saveallimages,
framesperfile, framenumbers,
filepostfix, "")
self.assertEqual(fn2, sfn3)
tstroot.stepsperfile = 20
tstroot.currentfileid = 3
filename = "/tmp/scans/mytest_324234.nxs"
fn4 = lmbd.external_data(
commonblock, name, savefilename, saveallimages,
framesperfile, framenumbers,
filepostfix, filename)
self.assertEqual(fn4, sfn4)
def test_pco_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import pco
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
filestartnum = 20
filedir = "/tmp/current/"
nbframes = 20
filepostfix = ".tif"
fileprefix = "scan213123_"
filestartnum_str = "pco2_filestartnum"
commonblock[filestartnum_str] = 1
sfn1 = "/tmp/current/scan213123_%05d.tif:0:19"
fn1 = pco.postrun(
commonblock, filestartnum, filedir, nbframes,
filepostfix, fileprefix, filestartnum_str)
self.assertEqual(fn1, sfn1)
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
fn1 = pco.postrun(
commonblock, filestartnum, filedir, nbframes,
filepostfix, fileprefix, filestartnum_str)
self.assertEqual(fn1, sfn1)
def test_marccd_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import marccd
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
savingdirectory = "/tmp/current/"
savingprefix = "scan_213123"
savingpostfix = "tif"
sfn1 = "/tmp/current/scan_213123.tif"
fn1 = marccd.postrun(
commonblock,
savingdirectory,
savingprefix,
savingpostfix)
self.assertEqual(fn1, sfn1)
def test_mythen_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import mythen
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
fileindex = 20
filedir = "/tmp/current/"
fileprefix = "scan213123"
fileindex_str = "mythen_fileindex"
commonblock[fileindex_str] = 1
sfn1 = "/tmp/current/scan213123_%d.raw:1:19"
fn1 = mythen.postrun(
commonblock,
fileindex,
filedir,
fileprefix,
fileindex_str)
self.assertEqual(fn1, sfn1)
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
self.assertEqual(fn1, sfn1)
fn1 = mythen.postrun(
commonblock,
fileindex,
filedir,
fileprefix,
fileindex_str)
self.assertEqual(fn1, sfn1)
def test_pilatus_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import pilatus
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
filestartnum = 20
filedir = "/tmp/current/"
nbframes = 20
filepostfix = ".tif"
fileprefix = "scan213123_"
filestartnum_str = "pilatus2_filestartnum"
commonblock[filestartnum_str] = 1
sfn1 = "/tmp/current/scan213123_%05d.tif:0:19"
fn1 = pilatus.postrun(
commonblock, filestartnum, filedir, nbframes,
filepostfix, fileprefix, filestartnum_str)
self.assertEqual(fn1, sfn1)
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
fn1 = pilatus.postrun(
commonblock, filestartnum, filedir, nbframes,
filepostfix, fileprefix, filestartnum_str)
self.assertEqual(fn1, sfn1)
def test_pilatus_mxparameters(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "lmbd"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = '%s_00000.nxs' % (fileprefix)
sfname1 = '%s_00000' % (fileprefix)
ffname1 = '%s/%s' % (path, fname1)
try:
try:
os.makedirs(path)
except FileExistsError:
pass
fl1 = self.fwriter.create_file(ffname1, overwrite=True)
rt = fl1.root()
entry = rt.create_group("scan_1234", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("pilatus2", "NXdetector")
commonblock = {
"lmbd_savefilename": [sfname1],
"lmbd_framenumbers": [30],
"__root__": rt,
}
mxparameters = "# Wavelength 1.03320 A\r\n" \
+ "# Detector_distance 0.32200 m\r\n" \
+ "# Beam_xy (1261.00, 1242.00) pixels\r\n" \
+ "# Filter_transmission 0.1000\r\n" \
+ "# Start_angle 204.9240 deg.\r\n" \
+ "# Angle_increment 0.1000 deg.\r\n# Phi 404.0470 deg.\r"
name = "pilatus2"
entryname = "scan_1234"
from nxstools.pyeval import pilatus
result = pilatus.mxparameters_cb(
commonblock,
mxparameters, name,
entryname,
insname="instrument"
)
self.assertEqual(mxparameters, result)
length = det.open("wavelength")
dist = det.open("distance")
beamx = det.open("beam_center_x")
beamy = det.open("beam_center_y")
self.assertEqual(length[...][0], 1.0332)
self.assertEqual(dist[...][0], 0.322)
self.assertEqual(beamx[...][0], 1261.)
self.assertEqual(beamy[...][0], 1242.)
det.close()
ins.close()
entry.close()
fl1.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
def test_dcm_unitcalibration(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
dcmdevice = "ttestp09/testts/t1r228"
braggdevice = "ttestp09/testts/t2r228"
value = 2187.3755
try:
tsv1 = TestServerSetUp.TestServerSetUp(
dcmdevice, "MYTESTS1")
tsv1.setUp()
db = PyTango.Database()
db.put_device_property(dcmdevice,
{'BraggDevice': [braggdevice]})
tsv1.dp.Init()
tsv2 = TestServerSetUp.TestServerSetUp(
braggdevice, "MYTESTS2")
tsv2.setUp()
from nxstools.pyeval import dcm
result = dcm.unitcalibration(dcmdevice)
self.assertEqual(value, result)
finally:
if tsv1:
tsv1.tearDown()
if tsv2:
tsv2.tearDown()
def test_dcm_reflection(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
dcmdevice = "ttestp09/testts/t1r228"
version = '11'
try:
tsv1 = TestServerSetUp.TestServerSetUp(
dcmdevice, "MYTESTS1")
tsv1.setUp()
db = PyTango.Database()
db.put_device_property(dcmdevice,
{'Version': [version]})
tsv1.dp.Init()
from nxstools.pyeval import dcm
tsv1.dp.crystal = 1
result = dcm.reflection(dcmdevice)
self.assertEqual([2, 2, 0], result)
tsv1.dp.crystal = 2
result = dcm.reflection(dcmdevice)
self.assertEqual([1, 1, 1], result)
version = "8"
db.put_device_property(dcmdevice,
{'Version': [version]})
tsv1.dp.Init()
from nxstools.pyeval import dcm
tsv1.dp.Crystal = 1
result = dcm.reflection(dcmdevice)
self.assertEqual([3, 1, 1], result)
tsv1.dp.Crystal = 2
result = dcm.reflection(dcmdevice)
self.assertEqual([1, 1, 1], result)
finally:
if tsv1:
tsv1.tearDown()
def test_dcm_crystal(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
dcmdevice = "ttestp09/testts/t1r228"
try:
tsv1 = TestServerSetUp.TestServerSetUp(
dcmdevice, "MYTESTS1")
tsv1.setUp()
from nxstools.pyeval import dcm
value = 1
tsv1.dp.crystal = value
result = dcm.crystal(dcmdevice)
self.assertEqual(value, result)
value = 2
tsv1.dp.crystal = value
result = dcm.crystal(dcmdevice)
self.assertEqual(value, result)
finally:
if tsv1:
tsv1.tearDown()
def test_limaccd_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import limaccd
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
filestartnum_str = "andor_saving_next_number"
commonblock[filestartnum_str] = 1
saving_next_number = 20
saving_directory = "/tmp/current/"
saving_suffix = ".tif"
acq_nb_frames = 20
saving_format = "_%05d"
saving_prefix = "scan213123"
sfn1 = "/tmp/current/scan213123_%05d.tif:0:19"
fn1 = limaccd.postrun(
commonblock,
saving_next_number, saving_directory, saving_suffix,
acq_nb_frames, saving_format, saving_prefix,
"andor_saving_next_number")
self.assertEqual(fn1, sfn1)
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
fn1 = limaccd.postrun(
commonblock,
saving_next_number, saving_directory, saving_suffix,
acq_nb_frames, saving_format, saving_prefix,
"andor_saving_next_number")
self.assertEqual(fn1, sfn1)
def test_pe_fileindex_cb(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import pe
commonblock = {}
sfn1 = 4
fn1 = pe.fileindex_cb(
commonblock, "pe_fileindex", sfn1)
self.assertEqual(fn1, sfn1 - 1)
self.assertEqual(len(commonblock), 1)
self.assertTrue("pe_fileindex" in commonblock)
self.assertEqual(commonblock["pe_fileindex"], sfn1)
def test_pe_postrun(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
from nxstools.pyeval import pe
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
fileindex_str = "pe_fileindex"
commonblock[fileindex_str] = 1
fileindex = 20
outputdirectory = "/tmp/current/"
filepattern = "scan213123"
filename = ".tif"
sfn1 = "/tmp/current/scan213123-%05d.tif:0:19"
fn1 = pe.postrun(
commonblock,
outputdirectory,
filepattern,
filename,
fileindex,
"pe_fileindex")
self.assertEqual(fn1, sfn1)
tstroot.stepsperfile = 20
tstroot.currentfileid = 1
fn1 = pe.postrun(
commonblock,
outputdirectory,
filepattern,
filename,
fileindex,
"pe_fileindex")
self.assertEqual(fn1, sfn1)
def test_tangovimba(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
name = "vimba"
fileprefix = "scan213123"
filepostfix = "nx"
filestartnum = 2
filename = "/tmp/scans/mytest_324234.nxs"
sfn1 = "mytest_324234/vimba/scan213123_000002.nx:" \
"//entry/instrument/detector"
from nxstools.pyeval import tangovimba
fn1 = tangovimba.external_data(
commonblock,
name,
fileprefix,
filepostfix,
filestartnum,
filename)
self.assertEqual(fn1, sfn1)
def test_dalsa(self):
""" test
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tstroot = TstRoot2()
commonblock = {"__root__": tstroot}
name = "dalsa"
fileprefix = "scan213123"
filepostfix = "nx"
filestartnum = 2
filename = "/tmp/scans/mytest_324234.nxs"
sfn1 = "mytest_324234/dalsa/scan213123_000001.nx:" \
"//entry/instrument/detector"
from nxstools.pyeval import dalsa
fn1 = dalsa.external_data(
commonblock,
name,
fileprefix,
filepostfix,
filestartnum,
filename)
self.assertEqual(fn1, sfn1)
def test_eigetdectris_triggermode_splitmode(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "eiger2"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['testscan_data_%06i.h5' % i for i in range(1, 4)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [14, 14, 2]
devicename = "ttestp09/testts/t1r228"
vl = [[[self._rnd.randint(1, 1600) for _ in range(2)]
for _ in range(1)]
for _ in range(30)]
try:
tsv1 = TestServerSetUp.TestServerSetUp(
devicename, "MYTESTS1")
tsv1.setUp()
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
dt = entry.create_group("data", "NXdata")
intimage = dt.create_field(
"data", "uint32",
[framenumbers[i], 1, 2], [1, 1, 2])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(2)]
for jj in range(1)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
dt.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
dt = entry.create_group("data", "NXdata")
col = det.create_group("collection", "NXcollection")
commonblock = {
"eiger2_stepindex": [30],
"__root__": rt,
}
triggermode = "splitmode"
name = "eiger2"
nbimages = 30
hostname = "localhost:10000"
device = devicename
stepindex_str = "eiger2_stepindex"
insname = "instrument"
eigerdectris_str = "TestServer"
eigerfilewriter_str = "TestServer"
from nxstools.pyeval import eigerdectris
result = eigerdectris.triggermode_cb(
commonblock,
name,
triggermode,
nbimages,
hostname,
device,
filename,
stepindex_str,
entryname,
insname,
eigerdectris_str,
eigerfilewriter_str)
fl.flush()
self.assertEqual(result, "splitmode")
for i in range(3):
images = col.open("data_%06i" % (i + 1))
rw = images.read()
for j in range(framenumbers[i]):
self.myAssertImage(rw[j], vl[j + framenumbers[0] * i])
images = dt.open("eiger2_%06i" % (i + 1))
rw = images.read()
for j in range(framenumbers[i]):
self.myAssertImage(rw[j], vl[j + framenumbers[0] * i])
intimage.close()
dt.close()
entry.close()
fl.close()
finally:
if tsv1:
tsv1.tearDown()
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_lambdavdsnm_triggermode_cb(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "lmbd"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_00000_m%02d.nxs' % (fileprefix, i) for i in range(1, 4)]
sfname1 = '%s_00000' % fileprefix
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [10, 10, 10]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint32",
[framenumbers[i], 10, 20], [1, 10, 20])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"__root__": rt,
}
triggermode = 0
translations = '{"m01":[0,0,0], "m02":[0,12,0], "m03":[0,24,0]}'
saveallimages = True
height = 10
width = 20
opmode = 24
filepostfix = "nxs"
framenumbers = 10
savefilename = sfname1
from nxstools.pyeval import lambdavds
result = lambdavds.nm_triggermode_cb(
commonblock,
"lmbd",
triggermode,
translations,
saveallimages,
filepostfix,
framenumbers,
height,
width,
opmode,
savefilename,
filename,
entryname,
"instrument")
self.assertEqual(triggermode, result)
images = det.open("data")
fl.flush()
rw = images.read()
# print(rw)
for i in range(10):
self.myAssertImage(rw[i, 0:10, 0:20], vl[i])
for i in range(10):
self.myAssertImage(rw[i, 12:22, 0:20], vl[i + 10])
for i in range(10):
self.myAssertImage(rw[i, 24:34, 0:20], vl[i + 20])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_dalsavds_triggermode_nosave(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
commonblock = {}
name = "dalsa"
triggermode = 0
filepostfix = "nxs"
fileprefix = "scan213123"
filepostfix = "nx"
filestartnum = 2
filedir = "/tmp/scans/"
filename = "mytest_324234.nxs"
entryname = "entry123"
insname = "instrument"
filesaving = False
triggermode = "splitmode"
framespernxfile = 43
pixelformat = "Mono8"
height = 2344
width = 2143
acquisitionmode = "SingleFrame"
acquisitionframecount = 43
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
def test_dalsavds_triggermode_singleframe(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
filepostfix = "nx"
filestartnum = 0
filedir = "/tmp/scans/"
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "dalsa"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nx' % (fileprefix, i)
for i in range(filestartnum, 30 + filestartnum)]
# sfname1 = ['%s_%05d' % (fileprefix, i)
# for i in range(filestartnum, 30 + filestartnum)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint16", [1, 10, 20], [1, 10, 20])
vv = [[[vl[i][jj][ii] for ii in range(20)]
for jj in range(10)]]
intimage[0, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"dalsa_filestartnum": list(range(1, 31)),
"dalsa_nrexposedframes": list(range(1, 31)),
"__root__": rt,
}
name = "dalsa"
insname = "instrument"
filesaving = True
triggermode = "ExtTrigger"
framespernxfile = 40
pixelformat = "Mono16"
height = 10
width = 20
acquisitionmode = "SingleFrame"
acquisitionframecount = 30
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_dalsavds_triggermode_multiframe(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
filepostfix = "nx"
filestartnum = 0
filedir = "/tmp/scans/"
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "dalsa"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nx' % (fileprefix, i)
for i in range(3)]
# sfname1 = ['%s_%05d' % (fileprefix, i)
# for i in range(filestartnum, 30 + filestartnum)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [10, 10, 10]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint16",
[framenumbers[i], 10, 20], [framenumbers[i], 10, 20])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"dalsa_filestartnum": [1, 2, 3],
"dalsa_nrexposedframes": [10, 10, 10],
"__root__": rt,
}
name = "dalsa"
insname = "instrument"
filesaving = True
triggermode = "ExtTrigger"
framespernxfile = 40
pixelformat = "Mono16"
height = 10
width = 20
acquisitionmode = "MultiFrame"
acquisitionframecount = 10
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_dalsavds_triggermode_multiframe_split(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
filepostfix = "nx"
filestartnum = 0
filedir = "/tmp/scans/"
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "dalsa"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nx' % (fileprefix, i)
for i in range(4)]
# sfname1 = ['%s_%05d' % (fileprefix, i)
# for i in range(filestartnum, 30 + filestartnum)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [10, 5, 10, 5]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
index = 0
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint16",
[framenumbers[i], 10, 20], [framenumbers[i], 10, 20])
vv = [[[vl[index + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
index += framenumbers[i]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"dalsa_filestartnum": [2, 4],
"dalsa_nrexposedframes": [15, 15],
"__root__": rt,
}
name = "dalsa"
insname = "instrument"
filesaving = True
triggermode = "ExtTrigger"
framespernxfile = 10
pixelformat = "Mono16"
height = 10
width = 20
acquisitionmode = "MultiFrame"
acquisitionframecount = 15
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_dalsavds_triggermode_continuous(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
filepostfix = "nx"
filestartnum = 0
filedir = "/tmp/scans/"
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "dalsa"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nx' % (fileprefix, i)
for i in range(1)]
# sfname1 = ['%s_%05d' % (fileprefix, i)
# for i in range(filestartnum, 30 + filestartnum)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [30]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint16",
[framenumbers[i], 10, 20], [framenumbers[i], 10, 20])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"dalsa_filestartnum": [1],
"dalsa_nrexposedframes": [30],
"__root__": rt,
}
name = "dalsa"
insname = "instrument"
filesaving = True
triggermode = "ExtTrigger"
framespernxfile = 40
pixelformat = "Mono16"
height = 10
width = 20
acquisitionmode = "Continuous"
acquisitionframecount = 0
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
def test_dalsavds_triggermode_continuous_split(self):
"""
"""
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
if not self.fwriter.is_vds_supported():
print("Skip the test: VDS not supported")
return
filepostfix = "nx"
filestartnum = 0
filedir = "/tmp/scans/"
mfileprefix = "%s%s" % (self.__class__.__name__, fun)
fileprefix = "%s%s" % (self.__class__.__name__, fun)
scanid = 12345
name = "dalsa"
filename = "%s_%s.nxs" % (mfileprefix, scanid)
mainpath = "%s_%s" % (mfileprefix, scanid)
path = "%s_%s/%s" % (mfileprefix, scanid, name)
self._fname = filename
fname1 = ['%s_%05d.nx' % (fileprefix, i)
for i in range(3)]
# sfname1 = ['%s_%05d' % (fileprefix, i)
# for i in range(filestartnum, 30 + filestartnum)]
ffname1 = ['%s/%s' % (path, fn) for fn in fname1]
framenumbers = [14, 14, 2]
vl = [[[self._rnd.randint(1, 1600) for _ in range(20)]
for _ in range(10)]
for _ in range(30)]
try:
try:
os.makedirs(path)
except FileExistsError:
pass
for i, fn in enumerate(ffname1):
fl1 = self.fwriter.create_file(fn, overwrite=True)
rt = fl1.root()
entry = rt.create_group("entry", "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group("detector", "NXdetector")
intimage = det.create_field(
"data", "uint16",
[framenumbers[i], 10, 20], [framenumbers[i], 10, 20])
vv = [[[vl[i * framenumbers[0] + nn][jj][ii]
for ii in range(20)]
for jj in range(10)]
for nn in range(framenumbers[i])]
intimage[:, :, :] = vv
intimage.close()
det.close()
ins.close()
entry.close()
fl1.close()
entryname = "entry123"
fl = self.fwriter.create_file(self._fname, overwrite=True)
rt = fl.root()
entry = rt.create_group(entryname, "NXentry")
ins = entry.create_group("instrument", "NXinstrument")
det = ins.create_group(name, "NXdetector")
commonblock = {
"dalsa_filestartnum": [3],
"dalsa_nrexposedframes": [30],
"__root__": rt,
}
name = "dalsa"
insname = "instrument"
filesaving = True
triggermode = "ExtTrigger"
framespernxfile = 14
pixelformat = "Mono16"
height = 10
width = 20
acquisitionmode = "Continuous"
acquisitionframecount = 0
from nxstools.pyeval import dalsavds
result = dalsavds.triggermode(
commonblock,
name,
filedir,
fileprefix,
filepostfix,
filestartnum,
filesaving,
triggermode,
framespernxfile,
pixelformat,
height,
width,
acquisitionmode,
acquisitionframecount,
"dalsa_filestartnum",
"dalsa_nrexposedframes",
filename,
entryname,
insname)
self.assertEqual(triggermode, result)
images = det.open("data")
rw = images.read()
for i in range(30):
self.myAssertImage(rw[i], vl[i])
intimage.close()
det.close()
ins.close()
entry.close()
fl.close()
finally:
shutil.rmtree(mainpath,
ignore_errors=False, onerror=None)
os.remove(self._fname)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
jensengroup/propka-3.1
|
tests/test_streamio.py
|
1
|
3842
|
"""Tests for PROPKA stream io"""
import logging
from pathlib import Path
from io import StringIO
import pytest
from propka.parameters import Parameters
from propka.molecular_container import MolecularContainer
from propka.input import read_parameter_file, read_molecule_file
from propka.lib import loadOptions
from .test_basic_regression import get_test_dirs, compare_output
_LOGGER = logging.getLogger(__name__)
def get_paths(pdb):
"""Helper function to get the path to the input and reference files"""
path_dict = get_test_dirs()
ref_path = path_dict["results"] / ("{0:s}.dat".format(pdb))
pdb_path = path_dict["pdbs"] / ("{0:s}.pdb".format(pdb))
return ref_path.resolve(), pdb_path.resolve()
def run_propka_stream(options, input_file, filename):
"""Run PROPKA software.
Args:
options: list of PROPKA options
input_file: file-like PDB object
filename: filename for the file-like PDB object
"""
options += [filename]
args = loadOptions(options)
parameters = read_parameter_file(args.parameters, Parameters())
molecule = MolecularContainer(parameters, args)
molecule = read_molecule_file(filename, molecule, stream=input_file)
molecule.calculate_pka()
molecule.write_pka()
if args.generate_propka_input:
molecule.write_propka()
@pytest.mark.parametrize("pdb, options", [
pytest.param("1FTJ-Chain-A", [], id="1FTJ-Chain-A: no options"),
pytest.param('3SGB-subset', [
"--titrate_only",
"E:17,E:18,E:19,E:29,E:44,E:45,E:46,E:118,E:119,E:120,E:139"],
id="3SGB: --titrate_only"),
pytest.param('1HPX-warn', ['--quiet'], id="1HPX-warn: --quiet"),
])
def test_textio_filestream(tmpdir, pdb, options):
"""Basic regression test using TextIO streams for the input PDB file"""
# Get the relevant paths
ref_path, pdb_path = get_paths(pdb)
filename = f"{pdb}.pdb"
filestream = open(pdb_path, 'r')
with tmpdir.as_cwd():
run_propka_stream(options, filestream, filename)
compare_output(pdb, Path.cwd(), ref_path)
filestream.close()
@pytest.mark.parametrize("pdb, options", [
pytest.param("1FTJ-Chain-A", [], id="1FTJ-Chain-A: no options"),
pytest.param('3SGB-subset', [
"--titrate_only",
"E:17,E:18,E:19,E:29,E:44,E:45,E:46,E:118,E:119,E:120,E:139"],
id="3SGB: --titrate_only"),
pytest.param('1HPX-warn', ['--quiet'], id="1HPX-warn: --quiet"),
])
def test_stringio_filestream(tmpdir, pdb, options):
"""Basic regression test using StringIO streams for the input PDB file"""
# Get the relevant paths
ref_path, pdb_path = get_paths(pdb)
filename = f"{pdb}.pdb"
with open(pdb_path, 'r') as writer:
filestream = StringIO(writer.read())
with tmpdir.as_cwd():
run_propka_stream(options, filestream, filename)
compare_output(pdb, Path.cwd(), ref_path)
filestream.close()
def test_valuerror_nofiletype():
"""Tests for raised ValueError when an unknown filename is passed to
read_molecule_file"""
pdb = "1FTJ-Chain-A"
options = []
ref_path, pdb_path = get_paths(pdb)
with open(pdb_path, 'r') as writer:
filestream = StringIO(writer.read())
errmsg = "Unknown input file type"
with pytest.raises(ValueError, match=errmsg):
run_propka_stream(options, filestream, filename="test.dat")
def test_valuerror_notpdb():
"""Tests for raised ValueError when a stream object that isn't a PDB
is passed to read_molecule_file"""
pdb = "1FTJ-Chain-A"
options = []
ref_path, pdb_path = get_paths(pdb)
filestream = StringIO()
errmsg = "The pdb file does not seem to contain any "
with pytest.raises(ValueError, match=errmsg):
run_propka_stream(options, filestream, filename="test.pdb")
|
lgpl-2.1
|
twilsonco/CPGobbler
|
kf.py
|
1
|
12472
|
#!/usr/bin/env python
# kf.py - Implementation of a Python interface to read and write KF files.
# This is an implementation that uses the KF utilities to do the actual work,
# so you need a working ADF installation.
#
# Copyright (C) 2006-2008 by Scientific Computing and Modelling NV.
# For support, contact SCM support (support at scm . com)
#
# This file is part of the ADF software
# For more information, see <http://www.scm.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# SCM owns the intellectual property right for this file and reserves the
# right to distribute it under a license other than LGPL
try:
import numpy
except:
numpy = None
#--------------
# Exceptions
#--------------
class PyADFException:
def __init__(self, message = 'A PyADFException occurred.'): self._msg = message
def message(self): return self._msg
#--------------
# KF data types
#--------------
IntegerType = 1
RealType = 2
CharacterType = 3
LogicalType = 4
class KFType:
def stringForData(self, data) :
import string
try:
s = string.join( map(str, data), ' ')
except:
raise PyADFException('Failed to convert data to string')
return s
def dataForString(self, str, numElements):
# Break string up into strings for each element
import re
elements = []
elementCount = 0
for match in re.finditer( self.regExForDataElement(), str ):
elements.append( match.group(0) )
elementCount = elementCount + 1
if numElements <= elementCount: break
# Convert elements to appropriate python types
convertFunc = self.stringToDataConversionFunc()
if convertFunc: elements = map( convertFunc, elements )
return elements
def formatData(self, data, nperline, fmt) :
s = ""
count = 0
for r in data:
count = count + 1
if count > nperline:
s = s + "\n"
count = 1
str = fmt % r
s = s + str
return s
def len (self, d) : return len(d)
class KFIntegerType (KFType):
def typeEnum(self) : return IntegerType
def regExForDataElement(self) : return r'\s*[+-]?\d+\s*'
def stringToDataConversionFunc(self) : return int
def stringForData(self, data) : return self.formatData(data, 8, "%10i")
class KFRealType (KFType):
def typeEnum(self) : return RealType
def regExForDataElement(self) : return r'\s*[+-]?[\d\.+-eE]+\s*'
def stringToDataConversionFunc(self) : return float
def stringForData(self, data) : return self.formatData(data, 3, "%26.18e")
class KFCharacterType (KFType):
def typeEnum(self) : return CharacterType
def regExForDataElement(self) : return r'\n?.'
def stringToDataConversionFunc(self) : return None
def len(self,d) : return 160*len(d)
def stringForData(self, data) :
s = ""
for str in data:
longstr = str.ljust(160)
s1 = longstr[0:79]
s2 = longstr[80:159]
s = s + s1 + "\n" + s2 + "\n"
return s
def dataForString(self, str, numElements):
import string
s = []
mystr = str
mystr = string.replace (mystr, "\n", "")
for n in range(numElements/160):
s.append (mystr[0:159].rstrip())
mystr = mystr[160:]
return s
class KFLogicalType (KFType):
def stringForData(self, data):
count = 0
s = ""
for l in data:
if count == 80:
s = s + "\n"
count = 0
count = count + 1
if l: s = s + "T"
else: s = s + "F"
return s
def typeEnum(self) : return KFLogicalType
def regExForDataElement(self) : return r'[TF]'
def stringToDataConversionFunc(self) : return self.stringToLogical
def stringToLogical(self, str) :
return str == "T"
def KFTypeForEnum( enum ):
"Factory for creating KF Type instances"
t = None
if enum == 1 : t = KFIntegerType()
elif enum == 2 : t = KFRealType()
elif enum == 3 : t = KFCharacterType()
elif enum == 4 : t = KFLogicalType()
else : raise PyADFException('Invalid type in KFTypeForEnum')
return t
#----------------------
# KF file wrapper class
#----------------------
class kffile:
"""
A class wrapper for an ADF KF file. Allows reading from and writing
to binary KF files from python. Makes use of the ADF utilities
dmpkf, udmpkf, and cpkf.
"""
def __init__(self, fileName ):
import os
self._fileName = fileName
if not 'ADFBIN' in os.environ :
self._kfpath = ''
else :
self._kfpath = os.environ['ADFBIN']
def delete (self) :
import os
os.remove (self._fileName)
def close (self) :
# for compatibility with binary version
pass
def _write( self, section, variable, data, dataEnum ):
"""
Sets the data for a particular variable in the kf file.
"""
d = data
if isinstance(d,str): d = [d]
if type(d) == 'array': d = d.flat
import operator
if not operator.isSequenceType(d): d = [d]
typ = KFTypeForEnum(dataEnum)
l = typ.len(d)
varString = '%s\n%s\n%10d%10d%10d\n%s\n' % (section, variable, l, l, dataEnum, typ.stringForData(d))
self._storeString( varString, section, variable )
def writereals( self, sec, var, data ):
self._write( sec, var, data, RealType )
def writeints( self, sec, var, data ):
self._write( sec, var, data, IntegerType )
def writelogicals( self, sec, var, data ):
self._write( sec, var, data, LogicalType )
def writechars( self, sec, var, data ):
self._write( sec, var, data, CharacterType )
def read( self, section, variable ):
"""
Extract the data for a given section and variable from
the kf file, and return it in a python data container.
If no data is found, None is returned.
"""
fileString = self.stringData(section, variable)
# Locate the section and variable, and get the data parameters
import re
searchString = r'^' + section + r'\s*\n' + variable + r'\s*\n((\s*\d+){3})\s*\n'
match = re.search( searchString, fileString, re.M )
if not match: return None
intStringArray = re.split( r'\s+', match.group(1) )
intStringArray.pop(0)
memory, numElements, dataEnum = map( int, intStringArray )
dataStartIndex = match.end()
# Extract and return data, converting to numpy array if numpy is present
typ = KFTypeForEnum(dataEnum)
result = typ.dataForString(fileString[dataStartIndex:], numElements)
# if result == None: return None
if numpy: return numpy.array(result)
return result
def stringData(self, section, variable):
"""
Returns an ascii dump of the whole kf file.
"""
import os
import subprocess
dumpCmd = os.path.join(self._kfpath, 'dmpkf')
dumpCmd = dumpCmd + ' ' + self._fileName + ' "' + section + '%' + variable + '"'
# print "KF command: %s" % dumpCmd
if os.name != 'nt':
dumpCmd += ' 2>/dev/null'
# outPipe = os.popen(dumpCmd)
# str = outPipe.read()
str = subprocess.Popen(dumpCmd, shell=True, stdout=subprocess.PIPE).stdout.read()
# outPipe.close()
return str
def _storeString(self, str, sec, var):
"""
Copies the string passed, into the binary kf file.
Assumes udmpkf can parse the string.
"""
import os
# Undump string data with udmpkf
import tempfile, os, popen2
path = tempfile.mktemp()
udumpCmd = os.path.join(self._kfpath, 'udmpkf')
tochild = os.popen(udumpCmd + ' ' + path + ' 2>/dev/null', 'w')
tochild.write(str)
tochild.close()
# Work around start script bug: __0 files only renamed in current directory
if os.path.isfile(path+'__0'):
os.rename(path+'__0', path)
# Use cpkf to merge the two binary files
copyCmd = os.path.join(self._kfpath, 'cpkf')
copyCmd = copyCmd + ' ' + path + ' ' + self._fileName + " '" + sec + '%' + var + "'" + \
' 2> /dev/null'
os.system(copyCmd)
# Close temporary file
os.remove(path)
#---------------
# Unit tests
#---------------
from unittest import *
class KFFileTests (TestCase):
"""
Unit tests for the KFFile class.
"""
def setUp(self):
import shutil, tempfile, os.path
self.kfPath = os.path.join( tempfile.gettempdir(), 'KFFileTests_TAPE21' )
self.kf = kffile(self.kfPath)
def tearDown(self):
import os
if os.path.isfile(self.kfPath): os.remove (self.kfPath)
def testLogicals(self):
lint = 1
self.kf.writelogicals ('Logicals', 'scalar true', lint)
linf = 0
self.kf.writelogicals ('Logicals', 'scalar false', linf)
lout = self.kf.read ('Logicals', 'scalar true')
self.assertEqual (lout[0], lint)
self.assertEqual (len(lout), 1)
lout = self.kf.read ('Logicals', 'scalar false')
self.assertEqual (lout[0], linf)
self.assertEqual (len(lout), 1)
lin = [0,1,0,1]
self.kf.writelogicals ('Logicals', 'list', lin)
lout = self.kf.read ('Logicals', 'list')
self.assertEqual (list(lout), lin)
def testReals(self):
rin = 3.14
self.kf.writereals ('Reals', 'scalar', rin)
rout = self.kf.read ('Reals', 'scalar')
self.assertEqual ([rin], rout)
rin = [0.0,3.14,-1.0e-16,3e24]
self.kf.writereals ('Reals', 'list', rin)
rout = self.kf.read ('Reals', 'list')
self.assertEqual (rin, list(rout))
def testChars(self):
cin = "This is a long character string to test the pykf stuff, will it work or will it not? The string certainly is long."
self.kf.writechars ('String', 'scalar', cin)
cout = self.kf.read ('String', 'scalar')
self.assertEqual ([cin], cout)
cin = ["String 1","String 2", "Yet another String"]
self.kf.writechars ('String', 'list', cin)
cout = self.kf.read ('String', 'list')
self.assertEqual (cin, list(cout))
def testInts(self):
iin = 3
self.kf.writeints ('Ints', 'scalar', iin)
iout = self.kf.read ('Ints', 'scalar')
self.assertEqual ([iin], iout)
iin = [0,1,2,3,4,5,-123]
self.kf.writereals ('Ints', 'list', iin)
iout = self.kf.read ('Ints', 'list')
self.assertEqual (iin, list(iout))
def testNone(self):
res = self.kf.read('Blurb', 'jojo')
self.assertEqual (res, None)
def testCasesensitive(self):
i = 0
self.kf.writeints ('Names', 'Aap', i)
ii = self.kf.read ('Names', 'aap')
self.assertEqual (ii, None)
ii = self.kf.read ('names', 'Aap')
self.assertEqual (ii, None)
ii = self.kf.read ('Names', 'Aap')
self.assertEqual (list(ii), [i])
def testDeleteFile(self):
import os
self.kf.writechars ('Test', 'string', "Hello World")
self.failUnless (os.path.isfile(self.kfPath))
self.kf.delete()
self.failIf (os.path.isfile(self.kfPath))
def runTests():
allTestsSuite = TestSuite( [ makeSuite( KFFileTests, 'test' ) ] )
runner = TextTestRunner()
runner.run(allTestsSuite)
if __name__ == "__main__": runTests()
|
gpl-3.0
|
jarn0ld/gnuradio
|
grc/python/Generator.py
|
5
|
17769
|
"""
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import sys
import subprocess
import tempfile
import shlex
import codecs
import re # for shlex_quote
from distutils.spawn import find_executable
from Cheetah.Template import Template
from .. gui import Messages
from .. base import ParseXML
from .. base import odict
from .. base.Constants import BLOCK_FLAG_NEED_QT_GUI
from . Constants import TOP_BLOCK_FILE_MODE, FLOW_GRAPH_TEMPLATE, \
XTERM_EXECUTABLE, HIER_BLOCK_FILE_MODE, HIER_BLOCKS_LIB_DIR, BLOCK_DTD
from . import expr_utils
class Generator(object):
"""Adaptor for various generators (uses generate_options)"""
def __init__(self, flow_graph, file_path):
"""
Initialize the generator object.
Determine the file to generate.
Args:
flow_graph: the flow graph object
file_path: the path to the grc file
"""
self._generate_options = flow_graph.get_option('generate_options')
if self._generate_options == 'hb':
generator_cls = HierBlockGenerator
elif self._generate_options == 'hb_qt_gui':
generator_cls = QtHierBlockGenerator
else:
generator_cls = TopBlockGenerator
self._generator = generator_cls(flow_graph, file_path)
def get_generate_options(self):
return self._generate_options
def __getattr__(self, item):
"""get all other attrib from actual generator object"""
return getattr(self._generator, item)
class TopBlockGenerator(object):
def __init__(self, flow_graph, file_path):
"""
Initialize the top block generator object.
Args:
flow_graph: the flow graph object
file_path: the path to write the file to
"""
self._flow_graph = flow_graph
self._generate_options = self._flow_graph.get_option('generate_options')
self._mode = TOP_BLOCK_FILE_MODE
dirname = self._dirname = os.path.dirname(file_path)
# handle the case where the directory is read-only
# in this case, use the system's temp directory
if not os.access(dirname, os.W_OK):
dirname = tempfile.gettempdir()
filename = self._flow_graph.get_option('id') + '.py'
self._file_path = os.path.join(dirname, filename)
def get_file_path(self):
return self._file_path
def write(self):
"""generate output and write it to files"""
# do throttle warning
throttling_blocks = filter(lambda b: b.throtteling(), self._flow_graph.get_enabled_blocks())
if not throttling_blocks and not self._generate_options.startswith('hb'):
Messages.send_warning("This flow graph may not have flow control: "
"no audio or RF hardware blocks found. "
"Add a Misc->Throttle block to your flow "
"graph to avoid CPU congestion.")
if len(throttling_blocks) > 1:
keys = set(map(lambda b: b.get_key(), throttling_blocks))
if len(keys) > 1 and 'blocks_throttle' in keys:
Messages.send_warning("This flow graph contains a throttle "
"block and another rate limiting block, "
"e.g. a hardware source or sink. "
"This is usually undesired. Consider "
"removing the throttle block.")
# generate
for filename, data in self._build_python_code_from_template():
with codecs.open(filename, 'w', encoding='utf-8') as fp:
fp.write(data)
if filename == self.get_file_path():
try:
os.chmod(filename, self._mode)
except:
pass
def get_popen(self):
"""
Execute this python flow graph.
Returns:
a popen object
"""
run_command = self._flow_graph.get_option('run_command')
try:
run_command = run_command.format(
python=shlex_quote(sys.executable),
filename=shlex_quote(self.get_file_path()))
run_command_args = shlex.split(run_command)
except Exception as e:
raise ValueError("Can't parse run command {!r}: {}".format(run_command, e))
# when in no gui mode on linux, use a graphical terminal (looks nice)
xterm_executable = find_executable(XTERM_EXECUTABLE)
if self._generate_options == 'no_gui' and xterm_executable:
run_command_args = [xterm_executable, '-e', run_command]
# this does not reproduce a shell executable command string, if a graphical
# terminal is used. Passing run_command though shlex_quote would do it but
# it looks really ugly and confusing in the console panel.
Messages.send_start_exec(' '.join(run_command_args))
return subprocess.Popen(
args=run_command_args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False, universal_newlines=True
)
def _build_python_code_from_template(self):
"""
Convert the flow graph to python code.
Returns:
a string of python code
"""
output = list()
fg = self._flow_graph
title = fg.get_option('title') or fg.get_option('id').replace('_', ' ').title()
imports = fg.get_imports()
variables = fg.get_variables()
parameters = fg.get_parameters()
monitors = fg.get_monitors()
# list of blocks not including variables and imports and parameters and disabled
def _get_block_sort_text(block):
code = block.get_make().replace(block.get_id(), ' ')
try:
code += block.get_param('notebook').get_value() # older gui markup w/ wxgui
except:
pass
try:
code += block.get_param('gui_hint').get_value() # newer gui markup w/ qtgui
except:
pass
return code
blocks = expr_utils.sort_objects(
filter(lambda b: b.get_enabled() and not b.get_bypassed(), fg.iter_blocks()),
lambda b: b.get_id(), _get_block_sort_text
)
# List of regular blocks (all blocks minus the special ones)
blocks = filter(lambda b: b not in (imports + parameters), blocks)
for block in blocks:
key = block.get_key()
file_path = os.path.join(self._dirname, block.get_id() + '.py')
if key == 'epy_block':
src = block.get_param('_source_code').get_value()
output.append((file_path, src))
elif key == 'epy_module':
src = block.get_param('source_code').get_value()
output.append((file_path, src))
# Filter out virtual sink connections
cf = lambda c: not (c.is_bus() or c.is_msg() or c.get_sink().get_parent().is_virtual_sink())
connections = filter(cf, fg.get_enabled_connections())
# Get the virtual blocks and resolve their connections
virtual = filter(lambda c: c.get_source().get_parent().is_virtual_source(), connections)
for connection in virtual:
source = connection.get_source().resolve_virtual_source()
sink = connection.get_sink()
resolved = fg.get_parent().Connection(flow_graph=fg, porta=source, portb=sink)
connections.append(resolved)
# Remove the virtual connection
connections.remove(connection)
# Bypassing blocks: Need to find all the enabled connections for the block using
# the *connections* object rather than get_connections(). Create new connections
# that bypass the selected block and remove the existing ones. This allows adjacent
# bypassed blocks to see the newly created connections to downstream blocks,
# allowing them to correctly construct bypass connections.
bypassed_blocks = fg.get_bypassed_blocks()
for block in bypassed_blocks:
# Get the upstream connection (off of the sink ports)
# Use *connections* not get_connections()
get_source_connection = lambda c: c.get_sink() == block.get_sinks()[0]
source_connection = filter(get_source_connection, connections)
# The source connection should never have more than one element.
assert (len(source_connection) == 1)
# Get the source of the connection.
source_port = source_connection[0].get_source()
# Loop through all the downstream connections
get_sink_connections = lambda c: c.get_source() == block.get_sources()[0]
for sink in filter(get_sink_connections, connections):
if not sink.get_enabled():
# Ignore disabled connections
continue
sink_port = sink.get_sink()
connection = fg.get_parent().Connection(flow_graph=fg, porta=source_port, portb=sink_port)
connections.append(connection)
# Remove this sink connection
connections.remove(sink)
# Remove the source connection
connections.remove(source_connection[0])
# List of connections where each endpoint is enabled (sorted by domains, block names)
connections.sort(key=lambda c: (
c.get_source().get_domain(), c.get_sink().get_domain(),
c.get_source().get_parent().get_id(), c.get_sink().get_parent().get_id()
))
connection_templates = fg.get_parent().get_connection_templates()
msgs = filter(lambda c: c.is_msg(), fg.get_enabled_connections())
# list of variable names
var_ids = [var.get_id() for var in parameters + variables]
# prepend self.
replace_dict = dict([(var_id, 'self.%s' % var_id) for var_id in var_ids])
# list of callbacks
callbacks = [
expr_utils.expr_replace(cb, replace_dict)
for cb in sum([block.get_callbacks() for block in fg.get_enabled_blocks()], [])
]
# map var id to callbacks
var_id2cbs = dict([
(var_id, filter(lambda c: expr_utils.get_variable_dependencies(c, [var_id]), callbacks))
for var_id in var_ids
])
# load the namespace
namespace = {
'title': title,
'imports': imports,
'flow_graph': fg,
'variables': variables,
'parameters': parameters,
'monitors': monitors,
'blocks': blocks,
'connections': connections,
'connection_templates': connection_templates,
'msgs': msgs,
'generate_options': self._generate_options,
'var_id2cbs': var_id2cbs,
}
# build the template
t = Template(open(FLOW_GRAPH_TEMPLATE, 'r').read(), namespace)
output.append((self.get_file_path(), str(t)))
return output
class HierBlockGenerator(TopBlockGenerator):
"""Extends the top block generator to also generate a block XML file"""
def __init__(self, flow_graph, file_path):
"""
Initialize the hier block generator object.
Args:
flow_graph: the flow graph object
file_path: where to write the py file (the xml goes into HIER_BLOCK_LIB_DIR)
"""
TopBlockGenerator.__init__(self, flow_graph, file_path)
self._mode = HIER_BLOCK_FILE_MODE
self._file_path = os.path.join(HIER_BLOCKS_LIB_DIR,
self._flow_graph.get_option('id') + '.py')
self._file_path_xml = self._file_path + '.xml'
def get_file_path_xml(self):
return self._file_path_xml
def write(self):
"""generate output and write it to files"""
TopBlockGenerator.write(self)
ParseXML.to_file(self._build_block_n_from_flow_graph_io(), self.get_file_path_xml())
ParseXML.validate_dtd(self.get_file_path_xml(), BLOCK_DTD)
try:
os.chmod(self.get_file_path_xml(), self._mode)
except:
pass
def _build_block_n_from_flow_graph_io(self):
"""
Generate a block XML nested data from the flow graph IO
Returns:
a xml node tree
"""
# extract info from the flow graph
block_key = self._flow_graph.get_option('id')
parameters = self._flow_graph.get_parameters()
def var_or_value(name):
if name in map(lambda p: p.get_id(), parameters):
return "$"+name
return name
# build the nested data
block_n = odict()
block_n['name'] = self._flow_graph.get_option('title') or \
self._flow_graph.get_option('id').replace('_', ' ').title()
block_n['key'] = block_key
block_n['category'] = self._flow_graph.get_option('category')
block_n['import'] = "from {0} import {0} # grc-generated hier_block".format(
self._flow_graph.get_option('id'))
# make data
if parameters:
block_n['make'] = '{cls}(\n {kwargs},\n)'.format(
cls=block_key,
kwargs=',\n '.join(
'{key}=${key}'.format(key=param.get_id()) for param in parameters
),
)
else:
block_n['make'] = '{cls}()'.format(cls=block_key)
# callback data
block_n['callback'] = [
'set_{key}(${key})'.format(key=param.get_id()) for param in parameters
]
# Parameters
block_n['param'] = list()
for param in parameters:
param_n = odict()
param_n['name'] = param.get_param('label').get_value() or param.get_id()
param_n['key'] = param.get_id()
param_n['value'] = param.get_param('value').get_value()
param_n['type'] = 'raw'
block_n['param'].append(param_n)
# bus stuff
if self._flow_graph.get_bussink():
block_n['bus_sink'] = '1'
if self._flow_graph.get_bussrc():
block_n['bus_source'] = '1'
# sink/source ports
for direction in ('sink', 'source'):
block_n[direction] = list()
for port in self._flow_graph.get_hier_block_io(direction):
port_n = odict()
port_n['name'] = port['label']
port_n['type'] = port['type']
if port['type'] != "message":
port_n['vlen'] = var_or_value(port['vlen'])
if port['optional']:
port_n['optional'] = '1'
block_n[direction].append(port_n)
# more bus stuff
bus_struct_sink = self._flow_graph.get_bus_structure_sink()
if bus_struct_sink:
block_n['bus_structure_sink'] = bus_struct_sink[0].get_param('struct').get_value()
bus_struct_src = self._flow_graph.get_bus_structure_src()
if bus_struct_src:
block_n['bus_structure_source'] = bus_struct_src[0].get_param('struct').get_value()
# documentation
block_n['doc'] = "\n".join(field for field in (
self._flow_graph.get_option('author'),
self._flow_graph.get_option('description'),
self.get_file_path()
) if field)
block_n['grc_source'] = str(self._flow_graph.grc_file_path)
n = {'block': block_n}
return n
class QtHierBlockGenerator(HierBlockGenerator):
def _build_block_n_from_flow_graph_io(self):
n = HierBlockGenerator._build_block_n_from_flow_graph_io(self)
block_n = n['block']
if not block_n['name'].upper().startswith('QT GUI'):
block_n['name'] = 'QT GUI ' + block_n['name']
block_n.insert_after('category', 'flags', BLOCK_FLAG_NEED_QT_GUI)
gui_hint_param = odict()
gui_hint_param['name'] = 'GUI Hint'
gui_hint_param['key'] = 'gui_hint'
gui_hint_param['value'] = ''
gui_hint_param['type'] = 'gui_hint'
gui_hint_param['hide'] = 'part'
block_n['param'].append(gui_hint_param)
block_n['make'] += (
"\n#set $win = 'self.%s' % $id"
"\n${gui_hint()($win)}"
)
return n
###########################################################
# back-port from python3
###########################################################
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def shlex_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
|
gpl-3.0
|
avastms/shadowsocks
|
tests/coverage_server.py
|
1072
|
1655
|
#!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
import tornado.ioloop
import tornado.web
import urllib
class MainHandler(tornado.web.RequestHandler):
def get(self, project):
try:
with open('/tmp/%s-coverage' % project, 'rb') as f:
coverage = f.read().strip()
n = int(coverage.strip('%'))
if n >= 80:
color = 'brightgreen'
else:
color = 'yellow'
self.redirect(('https://img.shields.io/badge/'
'coverage-%s-%s.svg'
'?style=flat') %
(urllib.quote(coverage), color))
except IOError:
raise tornado.web.HTTPError(404)
application = tornado.web.Application([
(r"/([a-zA-Z0-9\-_]+)", MainHandler),
])
if __name__ == "__main__":
application.listen(8888, address='127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
|
apache-2.0
|
luiscarlosgph/nas
|
env/lib/python2.7/site-packages/django/contrib/auth/management/commands/changepassword.py
|
58
|
2060
|
from __future__ import unicode_literals
import getpass
from optparse import make_option
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
|
mit
|
ammelto/Cappuccino
|
Cappuccino/src/cappuccino.py
|
2
|
2288
|
import sys
import subprocess
arg = sys.argv
if len(arg) != 3:
sys.exit('Usage: cappuccino <input> <output>')
inf = open(arg[1], 'r')
tempOut = "Espresso" + arg[1]
outf = open(tempOut, 'w')
i = 0
o = 0
ilb = []
ob = []
sop = []
dontCare = []
deSop = []
deDontCare = []
for l in inf:
line = l.partition(' ')
if line[0] == '.i':
i = int(line[2])
outf.write(l)
elif line[0] == '.o':
o = int(line[2])
outf.write(l)
elif line[0] == '.ilb':
ilb = line
outf.write(l)
elif line[0] == '.ob':
ob = line
outf.write(l)
elif line[0].__contains__('.e'):
break
elif l.__contains__('sum'):
temp = l.split("+")
sop.append(temp[0])
if len(temp) > 1:
dontCare.append(temp[1])
else:
dontCare.append('NULL')
else:
outf.write(l)
if len(sop) > o:
sys.exit('Too many SOP equations listed and too few outputs')
for count in range(0, o):
deSop.append([])
deDontCare.append([])
for idx, x in enumerate(sop):
temp = str(x).replace(',', ' ')
temp = temp.replace('(', ' ')
temp = temp.replace(')', ' ')
temp = [int(s) for s in temp.split() if s.isdigit()]
deSop[idx].extend(temp)
for idx, x in enumerate(dontCare):
temp = str(x).replace(',', ' ')
temp = temp.replace('(', ' ')
temp = temp.replace(')', ' ')
temp = [int(s) for s in temp.split() if s.isdigit()]
deDontCare[idx].extend(temp)
print(sop)
print(dontCare)
print(deSop)
print(deDontCare)
binI = pow(2, i)
binO = pow(2, o)
for count in range(0, int(binI)):
s = str(bin(count))[2:]
if len(s) < i:
for x in range(0, i - len(s)):
s = '0' + s
outf.write(s)
outf.write(' ')
for x in range(0, o):
if count in deSop[x]:
outf.write('1')
elif count in deDontCare[x]:
outf.write('-')
else:
outf.write('0')
outf.write('\n')
outf.write('\n')
outf.write('.e')
outf.close()
program_name = "espresso"
arguments = ["-Dexact", "-o", "eqntott", tempOut]
command = [program_name]
command.extend(arguments)
f = open(arg[2], 'w')
subprocess.Popen(command, stdout=f).communicate()[0]
print(subprocess.Popen(command).communicate()[0])
|
mit
|
WojciechFocus/MMVD
|
tests/test_map_utilities.py
|
1
|
1310
|
# coding: utf-8
import pytest
from mmvdApp.utils import products, distances
from mmvdApp.shortest_path import a_star, neighbors
@pytest.mark.utils
@pytest.mark.map
def test_drop_zone(drop_zone1):
"""
Test if correct coordinates are returned for the drop zone in
warehouse_map1.
:param fixture drop_zone1: provided by `conftest.py`
"""
assert drop_zone1 == (4, 4)
@pytest.mark.utils
@pytest.mark.map
def test_products_coords(warehouse_map1, order1):
"""
Test:
* if correct coordinates are being returned
* if coordinates are returned in the same order as products are in `order1`
"""
coords = products(warehouse_map1, order1)
# order1 == ['f', 'b', 'a', 'c', 'd', 'e']
assert coords == [(3, 3), (1, 3), (1, 1), (2, 1), (2, 3), (3, 1)]
@pytest.mark.utils
@pytest.mark.map
def test_products_distances(warehouse_map1, order1, drop_zone1):
"""
Test if distances between various points are calculated correctly.
Test not only products, but also some points that aren't reachable.
"""
# test distances from drop zone to products and back
D = distances(warehouse_map1, products(warehouse_map1, order1),
start_pos=drop_zone1, end_pos=drop_zone1)
assert D == [(2, 2), (6, 4), (6, 8), (5, 9), (5, 3), (4, 10)]
|
mit
|
rayluo/boto
|
boto/fps/response.py
|
153
|
7866
|
# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
def ResponseFactory(action):
class FPSResponse(Response):
_action = action
_Result = globals().get(action + 'Result', ResponseElement)
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != action + 'Response':
super(FPSResponse, self).endElement(name, value, connection)
return FPSResponse
class ResponseElement(object):
def __init__(self, connection=None, name=None):
if connection is not None:
self._connection = connection
self._name = name or self.__class__.__name__
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{!s}: {!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
return '{0}({1})'.format(self.__class__.__name__,
', '.join(map(render, attrs)))
def startElement(self, name, attrs, connection):
return None
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != self._name:
setattr(self, name, value)
class Response(ResponseElement):
_action = 'Undefined'
def startElement(self, name, attrs, connection):
if name == 'ResponseMetadata':
setattr(self, name, ResponseElement(name=name))
elif name == self._action + 'Result':
setattr(self, name, self._Result(name=name))
else:
return super(Response, self).startElement(name, attrs, connection)
return getattr(self, name)
class ComplexAmount(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, self.Value)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', 'Value'):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class AmountCollection(ResponseElement):
def startElement(self, name, attrs, connection):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
class AccountBalance(AmountCollection):
def startElement(self, name, attrs, connection):
if name == 'AvailableBalances':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(AccountBalance, self).startElement(name, attrs, connection)
class GetAccountBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'AccountBalance':
setattr(self, name, AccountBalance(name=name))
return getattr(self, name)
return super(GetAccountBalanceResult, self).startElement(name, attrs,
connection)
class GetTotalPrepaidLiabilityResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingPrepaidLiability':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetTotalPrepaidLiabilityResult, self).startElement(name,
attrs, connection)
class GetPrepaidBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'PrepaidBalance':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetPrepaidBalanceResult, self).startElement(name, attrs,
connection)
class GetOutstandingDebtBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingDebt':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetOutstandingDebtBalanceResult, self).startElement(name,
attrs, connection)
class TransactionPart(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'FeesPaid':
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(TransactionPart, self).startElement(name, attrs,
connection)
class Transaction(ResponseElement):
def __init__(self, *args, **kw):
self.TransactionPart = []
super(Transaction, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'TransactionPart':
getattr(self, name).append(TransactionPart(name=name))
return getattr(self, name)[-1]
if name in ('TransactionAmount', 'FPSFees', 'Balance'):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(Transaction, self).startElement(name, attrs, connection)
class GetAccountActivityResult(ResponseElement):
def __init__(self, *args, **kw):
self.Transaction = []
super(GetAccountActivityResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Transaction':
getattr(self, name).append(Transaction(name=name))
return getattr(self, name)[-1]
return super(GetAccountActivityResult, self).startElement(name, attrs,
connection)
class GetTransactionResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'Transaction':
setattr(self, name, Transaction(name=name))
return getattr(self, name)
return super(GetTransactionResult, self).startElement(name, attrs,
connection)
class GetTokensResult(ResponseElement):
def __init__(self, *args, **kw):
self.Token = []
super(GetTokensResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Token':
getattr(self, name).append(ResponseElement(name=name))
return getattr(self, name)[-1]
return super(GetTokensResult, self).startElement(name, attrs,
connection)
|
mit
|
2014cdbg9/2014cdbg9
|
wsgi/programs/cdbg1/__init__.py
|
5
|
16251
|
import cherrypy
# 這是 CDBG1 類別的定義
class CDBG1(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDB 協同專案下的 cdbg1 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="cube1">cdbg1 正方體參數繪圖</a>(尺寸變數 a, b, c)<br /><br />
<a href="fourbar1">四連桿組立</a><br /><br />
請確定下列連桿位於 V:/home/fourbar 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/fourbar.7z">fourbar.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
'''
假如採用下列規畫
import programs.cdbg1 as cdbg1
root.cdbg1 = cdbg1.CDBG1()
則程式啟動後, 可以利用 /cdag1/cube1 呼叫函式執行
'''
@cherrypy.expose
def cube1(self, *args, **kwargs):
'''
// 假如要自行打開特定零件檔案
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("axle_5.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("axle_5.prt"));
var solid = session.GetModel("axle_5.prt",pfcCreate("pfcModelType").MDL_PART);
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcParameterExamples.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcComponentFeatExamples.js"></script>
</head>
<body>
<script type="text/javascript">
var session = pfcGetProESession ();
// 以目前所開啟的檔案為 solid model
// for volume
var solid = session.CurrentModel;
var a, b, c, i, j, aValue, bValue, cValue, volume, count;
// 將模型檔中的 a 變數設為 javascript 中的 a 變數
a = solid.GetParam("a");
b = solid.GetParam("b");
c = solid.GetParam("c");
volume=0;
count=0;
try
{
for(i=0;i<5;i++)
{
myf = 100;
myn = myf + i*10;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
aValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
bValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
a.Value = aValue;
b.Value = bValue;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
//var newfile = document.pwl.pwlMdlSaveAs("filename.prt", "v:/tmp", "filename_5_"+count+".prt");
// 測試 stl 轉檔
//var stl_csys = "PRT_CSYS_DEF";
//var stl_instrs = new pfcCreate ("pfcSTLASCIIExportInstructions").Create(stl_csys);
//stl_instrs.SetQuality(10);
//solid.Export("v:/tmp/filename_5_"+count+".stl", stl_instrs);
// 結束測試轉檔
//if (!newfile.Status) {
//alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
//}
} // for loop
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def fourbar1(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
<script type="text/javascript">
if (!pfcIsWindows())
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
var session = pfcGetProESession();
// 設定 config option
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣
var identityMatrix = pfcCreate ("pfcMatrix3D");
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set (x, y, 1.0);
else
identityMatrix.Set (x, y, 0.0);
}
var transf = pfcCreate ("pfcTransform3D").Create (identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件
if (model == void null || model.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
var assembly = model;
/**----------------------------------------------- link0 -------------------------------------------------------------**/
//檔案目錄,建議將圖檔放置工作目錄下較方便使用
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt");
// 若 link1.prt 在 session 則直接取用
var componentModel = session.GetModelFromDescr (descr);
//若 link1.prt 不在 session 則從工作目錄中載入 session
var componentModel = session.RetrieveModel(descr);
//若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != void null)
{
//注意這個 asmcomp 即為設定約束條件的本體
//asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 建立約束條件變數
var constrs = pfcCreate ("pfcComponentConstraints");
//設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
var asmDatums = new Array ("ASM_FRONT", "ASM_TOP", "ASM_RIGHT");
//設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
var compDatums = new Array ("FRONT", "TOP", "RIGHT");
//建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
var ids = pfcCreate ("intseq");
//建立路徑變數
var path = pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids);
//採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
//利用迴圈分別約束組立與零件檔中的三個定位平面
for (var i = 0; i < 3; i++)
{
//設定組立參考面
var asmItem = assembly.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]);
//若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
//設定零件參考面
var compItem = componentModel.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i]);
//若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, path);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (false, false);
//將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr);
}
//設定組立約束條件
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link1 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link1.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
var featID = components.Item(0).Id;
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_1", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
|
gpl-2.0
|
tjyang/psp
|
9.1addon/sbutils-1.3.1/src/sbutils-1.3.1/lib/classutil.py
|
4
|
4026
|
# Copyright (C) 2009 The Written Word, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# $Id$
class AbstractProperty (object):
def __init__ (self, attrname):
self._attrname = attrname
def __get__ (self, property, type):
msg = "Abstract property `%s.%s' accessed" % (type.__name__,
self._attrname)
raise AttributeError (msg)
class AbstractMethod (object):
"""Defines a class to create abstract methods
Derived from http://code.activestate.com/recipes/266468/"""
def __init__ (self, funcname):
self._funcname = funcname
def __get__ (self, obj, type):
"""Get callable object
@returns An instance of AbstractMethodHelper.
"""
return self.AbstractMethodHelper (self._funcname, type)
class AbstractMethodHelper (object):
"""Abstract method helper class
An AbstractMethodHelper instance is a callable object that
represents an abstract method.
"""
def __init__ (self, funcname, cls):
self._funcname = funcname
self._class = cls
def __call__ (self, *args, **kwds):
"""Call abstract method
Raises a TypeError, because abstract methods can not be called.
"""
msg = "Abstract method `%s.%s' called" % (self._class.__name__,
self._funcname)
raise TypeError (msg)
class AbstractMeta (type):
def __init__ (cls, name, bases, *args, **kwds):
"""Configure a new class
@param cls: Class object
@param name: Name of the class
@param bases: All base classes for cls
"""
super (AbstractMeta, cls).__init__ (cls, name, bases, *args, **kwds)
# Detach cls.new () from class AbstractMeta, and make it a method
# of cls.
cls.__new__ = staticmethod (cls.new)
# Find all abstract attributes, and assign them to either
# cls.__abstractmethods__ or cls.__abstractprops__, so we
# can report them when an instantiation is attempted.
abstractmethods = []
abstractprops = []
ancestors = list (cls.__mro__)
ancestors.reverse () # Start with __builtin__.object
for ancestor in ancestors:
for clsname, clst in ancestor.__dict__.items ():
if isinstance (clst, AbstractMethod):
abstractmethods.append (clsname)
elif isinstance (clst, AbstractProperty):
abstractprops.append (clsname)
else:
if clsname in abstractmethods:
abstractmethods.remove (clsname)
elif clsname in abstractprops:
abstractprops.remove (clsname)
abstractmethods.sort ()
setattr (cls, '__abstractmethods__', abstractmethods)
abstractprops.sort ()
setattr (cls, '__abstractprops__', abstractprops)
def new (self, cls, *args, **kwds):
"""Allocator for class cls
@param self: Class object for which an instance should be created.
@param cls: Same as self.
"""
if len (cls.__abstractmethods__) or len (cls.__abstractprops__):
msg = "Can't instantiate class `%s';" % cls.__name__
if len (cls.__abstractmethods__):
msg += "\nUnimplemented abstract methods: " + \
", ".join (cls.__abstractmethods__)
if len (cls.__abstractprops__):
msg += "\nUnimplemented abstract properties: " + \
", ".join (cls.__abstractprops__)
raise NotImplementedError (msg)
return object.__new__ (self)
def abstract (f):
"""Decorator function for abstract methods"""
return AbstractMethod (f.__name__)
|
gpl-2.0
|
xiaowenhuman/airflow
|
airflow/operators/mysql_to_hive.py
|
31
|
4139
|
from builtins import chr
from collections import OrderedDict
import unicodecsv as csv
import logging
from tempfile import NamedTemporaryFile
import MySQLdb
from airflow.hooks import HiveCliHook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class MySqlToHiveTransfer(BaseOperator):
"""
Moves data from MySql to Hive. The operator runs your query against
MySQL, stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the MySQL database
:type sql: str
:param hive_table: target Hive table, use dot notation to target a
specific database
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
:param mysql_conn_id: source mysql connection
:type mysql_conn_id: str
:param hive_conn_id: destination hive connection
:type hive_conn_id: str
"""
template_fields = ('sql', 'partition', 'hive_table')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
hive_table,
create=True,
recreate=False,
partition=None,
delimiter=chr(1),
mysql_conn_id='mysql_default',
hive_cli_conn_id='hive_cli_default',
*args, **kwargs):
super(MySqlToHiveTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.mysql_conn_id = mysql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
@classmethod
def type_map(cls, mysql_type):
t = MySQLdb.constants.FIELD_TYPE
d = {
t.BIT: 'INT',
t.DECIMAL: 'DOUBLE',
t.DOUBLE: 'DOUBLE',
t.FLOAT: 'DOUBLE',
t.INT24: 'INT',
t.LONG: 'INT',
t.LONGLONG: 'BIGINT',
t.SHORT: 'INT',
t.YEAR: 'INT',
}
return d[mysql_type] if mysql_type in d else 'STRING'
def execute(self, context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
logging.info("Dumping MySQL query results to local file")
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile("w") as f:
csv_writer = csv.writer(f, delimiter=self.delimiter)
field_dict = OrderedDict()
for field in cursor.description:
field_dict[field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
f.flush()
cursor.close()
conn.close()
logging.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate)
|
apache-2.0
|
KohlsTechnology/ansible
|
lib/ansible/utils/module_docs_fragments/vultr.py
|
38
|
1400
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
api_key:
description:
- API key of the Vultr API.
- The ENV variable C(VULTR_API_KEY) is used as default, when defined.
api_timeout:
description:
- HTTP timeout to Vultr API.
- The ENV variable C(VULTR_API_TIMEOUT) is used as default, when defined.
default: 60
api_retries:
description:
- Amount of retries in case of the Vultr API retuns an HTTP 503 code.
- The ENV variable C(VULTR_API_RETRIES) is used as default, when defined.
default: 5
api_account:
description:
- Name of the ini section in the C(vultr.ini) file.
- The ENV variable C(VULTR_API_ACCOUNT) is used as default, when defined.
default: default
api_endpoint:
description:
- URL to API endpint (without trailing slash).
- The ENV variable C(VULTR_API_ENDPOINT) is used as default, when defined.
default: "https://api.vultr.com"
validate_certs:
description:
- Validate SSL certs of the Vultr API.
default: yes
type: bool
requirements:
- "python >= 2.6"
notes:
- Also see the API documentation on https://www.vultr.com/api/.
'''
|
gpl-3.0
|
quentinsf/ansible
|
lib/ansible/plugins/lookup/sequence.py
|
37
|
7347
|
# (c) 2013, Jayson Vantuyl <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from re import compile as re_compile, IGNORECASE
from ansible.errors import *
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.template import Templar
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError(
"must specify count or end in with_sequence"
)
elif self.count is not None and self.end is not None:
raise AnsibleError(
"can't specify both count and end in with_sequence"
)
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride > 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, variables, **kwargs):
results = []
if isinstance(terms, basestring):
terms = [ terms ]
templar = Templar(loader=self._loader, variables=variables)
for term in terms:
try:
self.reset() # clear out things for this iteration
term = templar.template(term)
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except Exception, e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
|
gpl-3.0
|
ilc-opensource/io-js
|
utils/autogen/ply/lex.py
|
482
|
40739
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
bsd-3-clause
|
ubc/edx-platform
|
scripts/hotfix.py
|
154
|
1898
|
#!/usr/bin/env python
"""
Script to generate alton and git commands for executing hotfixes
Commands for:
- cutting amis
- creating hotfix tag
The script should be run with the hotfix's git hash as a command-line argument.
i.e. `python scripts/hotfix.py <hotfix hash>`
"""
from __future__ import print_function
from datetime import date
import sys
import argparse
import textwrap
def generate_alton_commands(hotfix_hash):
"""
Generates commands for alton to cut amis from the git hash of the hotfix.
"""
template = textwrap.dedent("""
@alton cut ami for stage-edx-edxapp from prod-edx-edxapp with edx_platform_version={hotfix_hash}
@alton cut ami for prod-edge-edxapp from prod-edge-edxapp with edx_platform_version={hotfix_hash}
@alton cut ami for prod-edx-edxapp from prod-edx-edxapp with edx_platform_version={hotfix_hash}
""")
return template.strip().format(hotfix_hash=hotfix_hash)
def generate_git_command(hotfix_hash):
"""
Generates command to tag the git hash of the hotfix.
"""
git_string = 'git tag -a hotfix-{iso_date} -m "Hotfix for {msg_date}" {hotfix_hash}'.format(
iso_date=date.today().isoformat(),
msg_date=date.today().strftime("%b %d, %Y"),
hotfix_hash=hotfix_hash,
)
return git_string
def main():
parser = argparse.ArgumentParser(description="Generate alton and git commands for hotfixes")
parser.add_argument("hash", help="git hash for hotfix")
args = parser.parse_args()
hotfix_hash = args.hash
print("\nHere are the alton commands to cut the hotfix amis:")
print(generate_alton_commands(hotfix_hash))
print("\nHere is the git command to generate the hotfix tag:")
print(generate_git_command(hotfix_hash))
print("\nOnce you create the git tag, push the tag by running:")
print("git push --tags\n")
if __name__ == '__main__':
main()
|
agpl-3.0
|
samedder/azure-cli
|
src/azure-cli-core/azure/cli/core/telemetry.py
|
3
|
13612
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import datetime
import json
import locale
import os
import platform
import re
import sys
import traceback
import uuid
from functools import wraps
import azure.cli.core.decorators as decorators
import azure.cli.core.telemetry_upload as telemetry_core
PRODUCT_NAME = 'azurecli'
TELEMETRY_VERSION = '0.0.1.4'
AZURE_CLI_PREFIX = 'Context.Default.AzureCLI.'
decorators.is_diagnostics_mode = telemetry_core.in_diagnostic_mode
def _user_agrees_to_telemetry(func):
@wraps(func)
def _wrapper(*args, **kwargs):
if not _get_azure_cli_config().getboolean('core', 'collect_telemetry', fallback=True):
return
return func(*args, **kwargs)
return _wrapper
class TelemetrySession(object): # pylint: disable=too-many-instance-attributes
start_time = None
end_time = None
application = None
arg_complete_env_name = None
correlation_id = str(uuid.uuid4())
command = 'execute-unknown-command'
output_type = 'none'
parameters = []
result = 'None'
result_summary = None
payload_properties = None
exceptions = []
module_correlation = None
def add_exception(self, exception, fault_type, description=None, message=''):
details = {
'Reserved.DataModel.EntityType': 'Fault',
'Reserved.DataModel.Fault.Description': description or fault_type,
'Reserved.DataModel.Correlation.1': '{},UserTask,'.format(self.correlation_id),
'Reserved.DataModel.Fault.TypeString': exception.__class__.__name__,
'Reserved.DataModel.Fault.Exception.Message': _remove_cmd_chars(
message or str(exception)),
'Reserved.DataModel.Fault.Exception.StackTrace': _remove_cmd_chars(_get_stack_trace())
}
fault_type = _remove_symbols(fault_type).replace('"', '').replace("'", '').replace(' ', '-')
fault_name = '{}/commands/{}'.format(PRODUCT_NAME, fault_type.lower())
self.exceptions.append((fault_name, details))
@decorators.suppress_all_exceptions(raise_in_diagnostics=True, fallback_return=None)
def generate_payload(self):
events = []
base = self._get_base_properties()
cli = self._get_azure_cli_properties()
user_task = self._get_user_task_properties()
user_task.update(base)
user_task.update(cli)
events.append({'name': self.event_name, 'properties': user_task})
for name, props in self.exceptions:
props.update(base)
props.update(cli)
props.update({'Reserved.DataModel.CorrelationId': str(uuid.uuid4()),
'Reserved.EventId': str(uuid.uuid4())})
events.append({'name': name, 'properties': props})
payload = json.dumps(events)
return _remove_symbols(payload)
def _get_base_properties(self):
return {
'Reserved.ChannelUsed': 'AI',
'Reserved.EventId': str(uuid.uuid4()),
'Reserved.SequenceNumber': 1,
'Reserved.SessionId': str(uuid.uuid4()),
'Reserved.TimeSinceSessionStart': 0,
'Reserved.DataModel.Source': 'DataModelAPI',
'Reserved.DataModel.EntitySchemaVersion': 4,
'Reserved.DataModel.Severity': 0,
'Reserved.DataModel.ProductName': PRODUCT_NAME,
'Reserved.DataModel.FeatureName': self.feature_name,
'Reserved.DataModel.EntityName': self.command_name,
'Reserved.DataModel.CorrelationId': self.correlation_id,
'Context.Default.VS.Core.ExeName': PRODUCT_NAME,
'Context.Default.VS.Core.ExeVersion': '{}@{}'.format(
self.product_version, self.module_version),
'Context.Default.VS.Core.MacAddressHash': _get_hash_mac_address(),
'Context.Default.VS.Core.Machine.Id': _get_hash_machine_id(),
'Context.Default.VS.Core.OS.Type': platform.system().lower(), # eg. darwin, windows
'Context.Default.VS.Core.OS.Version': platform.version().lower(), # eg. 10.0.14942
'Context.Default.VS.Core.User.Id': _get_installation_id(),
'Context.Default.VS.Core.User.IsMicrosoftInternal': 'False',
'Context.Default.VS.Core.User.IsOptedIn': 'True',
'Context.Default.VS.Core.TelemetryApi.ProductVersion': '{}@{}'.format(
PRODUCT_NAME, _get_core_version())
}
def _get_user_task_properties(self):
result = {
'Reserved.DataModel.EntityType': 'UserTask',
'Reserved.DataModel.Action.Type': 'Atomic',
'Reserved.DataModel.Action.Result': self.result
}
if self.result_summary:
result['Reserved.DataModel.Action.ResultSummary'] = self.result_summary
return result
def _get_azure_cli_properties(self):
source = 'az' if self.arg_complete_env_name not in os.environ else 'completer'
result = {}
self.set_custom_properties(result, 'Source', source)
self.set_custom_properties(result,
'ClientRequestId',
lambda: self.application.session['headers'][
'x-ms-client-request-id'])
self.set_custom_properties(result, 'CoreVersion', _get_core_version)
self.set_custom_properties(result, 'InstallationId', _get_installation_id)
self.set_custom_properties(result, 'ShellType', _get_shell_type)
self.set_custom_properties(result, 'UserAzureId', _get_user_azure_id)
self.set_custom_properties(result, 'UserAzureSubscriptionId', _get_azure_subscription_id)
self.set_custom_properties(result, 'DefaultOutputType',
lambda: _get_azure_cli_config().get('core', 'output',
fallback='unknown'))
self.set_custom_properties(result, 'EnvironmentVariables', _get_env_string)
self.set_custom_properties(result, 'Locale',
lambda: '{},{}'.format(locale.getdefaultlocale()[0],
locale.getdefaultlocale()[1]))
self.set_custom_properties(result, 'StartTime', str(self.start_time))
self.set_custom_properties(result, 'EndTime', str(self.end_time))
self.set_custom_properties(result, 'OutputType', self.output_type)
self.set_custom_properties(result, 'Parameters', ','.join(self.parameters or []))
self.set_custom_properties(result, 'PythonVersion', platform.python_version())
self.set_custom_properties(result, 'ModuleCorrelation', self.module_correlation)
return result
@property
def command_name(self):
return self.command.lower().replace('-', '').replace(' ', '-')
@property
def event_name(self):
return '{}/{}/{}'.format(PRODUCT_NAME, self.feature_name, self.command_name)
@property
def feature_name(self):
# The feature name is used to created the event name. The feature name should be eventually
# the module name. However, it takes time to resolve the actual module name using pip
# module. Therefore, a hard coded replacement is used before a better solution is
# implemented
return 'commands'
@property
def module_version(self):
# TODO: find a efficient solution to retrieve module version
return 'none'
@property
def product_version(self):
return _get_core_version()
@classmethod
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_custom_properties(cls, prop, name, value):
actual_value = value() if hasattr(value, '__call__') else value
if actual_value:
prop[AZURE_CLI_PREFIX + name] = actual_value
_session = TelemetrySession()
# public api
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def start():
_session.start_time = datetime.datetime.now()
@_user_agrees_to_telemetry
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def conclude():
_session.end_time = datetime.datetime.now()
payload = _session.generate_payload()
if payload:
import subprocess
subprocess.Popen([sys.executable, os.path.realpath(telemetry_core.__file__), payload])
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_exception(exception, fault_type, summary=None):
if not summary:
_session.result_summary = summary
_session.add_exception(exception, fault_type=fault_type, description=summary)
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_failure(summary=None):
if _session.result != 'None':
return
_session.result = 'Failure'
if summary:
_session.result_summary = _remove_cmd_chars(summary)
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_success(summary=None):
if _session.result != 'None':
return
_session.result = 'Success'
if summary:
_session.result_summary = _remove_cmd_chars(summary)
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_user_fault(summary=None):
if _session.result != 'None':
return
_session.result = 'UserFault'
if summary:
_session.result_summary = _remove_cmd_chars(summary)
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_application(application, arg_complete_env_name):
_session.application, _session.arg_complete_env_name = application, arg_complete_env_name
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_command_details(command, output_type=None, parameters=None):
_session.command = command
_session.output_type = output_type
_session.parameters = parameters
@decorators.suppress_all_exceptions(raise_in_diagnostics=True)
def set_module_correlation_data(correlation_data):
_session.module_correlation = correlation_data[:512]
# definitions
@decorators.call_once
@decorators.suppress_all_exceptions(fallback_return={})
def _get_azure_cli_config():
from azure.cli.core._config import az_config
return az_config
# internal utility functions
@decorators.suppress_all_exceptions(fallback_return=None)
def _get_core_version():
from azure.cli.core import __version__ as core_version
return core_version
@decorators.suppress_all_exceptions(fallback_return=None)
def _get_installation_id():
return _get_profile().get_installation_id()
@decorators.call_once
@decorators.suppress_all_exceptions(fallback_return=None)
def _get_profile():
from azure.cli.core._profile import Profile
return Profile()
@decorators.suppress_all_exceptions(fallback_return='')
@decorators.hash256_result
def _get_hash_mac_address():
s = ''
for index, c in enumerate(hex(uuid.getnode())[2:].upper()):
s += c
if index % 2:
s += '-'
s = s.strip('-')
return s
@decorators.suppress_all_exceptions(fallback_return='')
def _get_hash_machine_id():
# Definition: Take first 128bit of the SHA256 hashed MAC address and convert them into a GUID
return str(uuid.UUID(_get_hash_mac_address()[0:32]))
@decorators.suppress_all_exceptions(fallback_return='')
@decorators.hash256_result
def _get_user_azure_id():
return _get_profile().get_current_account_user()
def _get_env_string():
return _remove_cmd_chars(_remove_symbols(str([v for v in os.environ
if v.startswith('AZURE_CLI')])))
@decorators.suppress_all_exceptions(fallback_return=None)
def _get_azure_subscription_id():
return _get_profile().get_subscription_id()
def _get_shell_type():
if 'ZSH_VERSION' in os.environ:
return 'zsh'
elif 'BASH_VERSION' in os.environ:
return 'bash'
elif 'KSH_VERSION' in os.environ or 'FCEDIT' in os.environ:
return 'ksh'
elif 'WINDIR' in os.environ:
return 'cmd'
return _remove_cmd_chars(_remove_symbols(os.environ.get('SHELL')))
@decorators.suppress_all_exceptions(fallback_return='')
@decorators.hash256_result
def _get_error_hash():
return str(sys.exc_info()[1])
@decorators.suppress_all_exceptions(fallback_return='')
def _get_stack_trace():
def _get_root_path():
dir_path = os.path.dirname(os.path.realpath(__file__))
head, tail = os.path.split(dir_path)
while tail and tail != 'azure-cli':
head, tail = os.path.split(head)
return head
def _remove_root_paths(s):
site_package_regex = re.compile('.*\\\\site-packages\\\\')
root = _get_root_path()
frames = [p.replace(root, '') for p in s]
return str([site_package_regex.sub('site-packages\\\\', f) for f in frames])
_, _, ex_traceback = sys.exc_info()
trace = traceback.format_tb(ex_traceback)
return _remove_cmd_chars(_remove_symbols(_remove_root_paths(trace)))
def _remove_cmd_chars(s):
if isinstance(s, str):
return s.replace("'", '_').replace('"', '_').replace('\r\n', ' ').replace('\n', ' ')
return s
def _remove_symbols(s):
if isinstance(s, str):
for c in '$%^&|':
s = s.replace(c, '_')
return s
|
mit
|
hurricup/intellij-community
|
python/lib/Lib/site-packages/django/conf/locale/ml/formats.py
|
341
|
1635
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
apache-2.0
|
ShineFan/odoo
|
addons/product/wizard/product_price.py
|
380
|
2254
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_price_list(osv.osv_memory):
_name = 'product.price_list'
_description = 'Price List'
_columns = {
'price_list': fields.many2one('product.pricelist', 'PriceList', required=True),
'qty1': fields.integer('Quantity-1'),
'qty2': fields.integer('Quantity-2'),
'qty3': fields.integer('Quantity-3'),
'qty4': fields.integer('Quantity-4'),
'qty5': fields.integer('Quantity-5'),
}
_defaults = {
'qty1': 1,
'qty2': 5,
'qty3': 10,
'qty4': 0,
'qty5': 0,
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@return : return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['price_list','qty1', 'qty2','qty3','qty4','qty5'], context=context)
res = res and res[0] or {}
res['price_list'] = res['price_list'][0]
datas['form'] = res
return self.pool['report'].get_action(cr, uid, [], 'product.report_pricelist', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kawamon/hue
|
desktop/core/ext-py/Django-1.11.29/django/core/management/commands/diffsettings.py
|
58
|
1956
|
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return {k: repr(v) for k, v in module.__dict__.items() if not omittable(k)}
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. Default values are prefixed by "###".',
)
parser.add_argument(
'--default', dest='default', metavar='MODULE', default=None,
help=(
"The settings module to compare the current settings against. Leave empty to "
"compare against Django's default settings."
),
)
def handle(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, Settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default = options['default']
default_settings = module_to_dict(Settings(default) if default else global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
|
apache-2.0
|
bjackman/lisa
|
libs/utils/perf_analysis.py
|
3
|
6952
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pylab as pl
import re
import sys
import trappy
import logging
# Regexp to match an rt-app generated logfile
TASK_NAME_RE = re.compile('.*\/rt-app-(.+)-[0-9]+.log')
class PerfAnalysis(object):
def __init__(self, datadir, tasks=None):
# Dataframe of all tasks performance data
self.perf_data = {}
# Folder containing all rt-app data
self.datadir = None
# Setup logging
self._log = logging.getLogger('PerfAnalysis')
# Load performance data generated by rt-app workloads
self.__loadRTAData(datadir, tasks)
# Keep track of the datadir from where data have been loaded
if len(self.perf_data) == 0:
raise ValueError('No performance data found on folder [{0:s}]'\
.format(datadir))
self.datadir = datadir
def __taskNameFromLog(self, logfile):
tname_match = re.search(TASK_NAME_RE, logfile)
if tname_match is None:
raise ValueError('The logfile [{0:s}] is not from rt-app'\
.format(logfile))
return tname_match.group(1)
def __logfileFromTaskName(self, taskname):
for logfile in glob.glob(
'{0:s}/rt-app-{1:s}.log'.format(self.datadir, taskname)):
return logfile
raise ValueError('No rt-app logfile found for task [{0:s}]'\
.format(taskname))
def tasks(self):
"""
Return the list of tasks for which performance data have been loaded
"""
if self.datadir is None:
raise ValueError("rt-app performance data not (yet) loaded")
return self.perf_data.keys()
def logfile(self, task):
"""
Return the logfile for the specified task
"""
if task not in self.perf_data:
raise ValueError('No logfile loaded for task [{0:s}]'\
.format(task))
return self.perf_data[task]['logfile']
def df(self, task):
"""
Return the PANDAS dataframe with the performance data for the
specified task
"""
if self.datadir is None:
raise ValueError("rt-app performance data not (yet) loaded")
if task not in self.perf_data:
raise ValueError('No dataframe loaded for task [{0:s}]'\
.format(task))
return self.perf_data[task]['df']
def __loadRTAData(self, datadir, tasks):
"""
Load peformance data of an rt-app workload
"""
if tasks is None:
# Lookup for all rt-app logfile into the specified datadir
for logfile in glob.glob('{0:s}/rt-app-*.log'.format(datadir)):
task_name = self.__taskNameFromLog(logfile)
self.perf_data[task_name] = {}
self.perf_data[task_name]['logfile'] = logfile
self._log.debug('Found rt-app logfile for task [%s]', task_name)
else:
# Lookup for specified rt-app task logfile into specified datadir
for task in tasks:
logfile = self.__logfileFromTaskName(task)
self.perf_data[task_name] = {}
self.perf_data[task_name]['logfile'] = logfile
self._log.debug('Found rt-app logfile for task [%s]', task_name)
# Load all the found logfile into a dataset
for task in self.perf_data.keys():
self._log.debug('Loading dataframe for task [%s]...', task)
df = pd.read_table(self.logfile(task),
sep='\s+',
skiprows=1,
header=0,
usecols=[1,2,3,4,7,8,9,10],
names=[
'Cycles', 'Run' ,'Period', 'Timestamp',
'Slack', 'CRun', 'CPeriod', 'WKPLatency'
])
# Normalize time to [s] with origin on the first event
start_time = df['Timestamp'][0]/1e6
df['Time'] = df['Timestamp']/1e6 - start_time
df.set_index(['Time'], inplace=True)
# Add performance metrics column, performance is defined as:
# slack
# perf = -------------
# period - run
df['PerfIndex'] = df['Slack'] / (df['CPeriod'] - df['CRun'])
# Keep track of the loaded dataframe
self.perf_data[task]['df'] = df
def plotPerf(self, task, title=None):
"""
Plot the Latency/Slack and Performance data for the specified task
"""
# Grid
gs = gridspec.GridSpec(2, 2, height_ratios=[4,1], width_ratios=[3,1]);
gs.update(wspace=0.1, hspace=0.1);
# Figure
plt.figure(figsize=(16, 2*6));
if title:
plt.suptitle(title, y=.97, fontsize=16,
horizontalalignment='center');
# Plot: Slack and Latency
axes = plt.subplot(gs[0,0]);
axes.set_title('Task [{0:s}] (start) Latency and (completion) Slack'\
.format(task));
data = self.df(task)[['Slack', 'WKPLatency']]
data.plot(ax=axes, drawstyle='steps-post', style=['b', 'g']);
# axes.set_xlim(x_min, x_max);
axes.xaxis.set_visible(False);
# Plot: Performance
axes = plt.subplot(gs[1,0]);
axes.set_title('Task [{0:s}] Performance Index'.format(task));
data = self.df(task)[['PerfIndex',]]
data.plot(ax=axes, drawstyle='steps-post');
axes.set_ylim(0, 2);
# axes.set_xlim(x_min, x_max);
# Plot: Slack Histogram
axes = plt.subplot(gs[0:2,1]);
data = self.df(task)[['PerfIndex',]]
data.hist(bins=30, ax=axes, alpha=0.4);
# axes.set_xlim(x_min, x_max);
pindex_avg = data.mean()[0];
pindex_std = data.std()[0];
self._log.info('PerfIndex, Task [%s] avg: %.2f, std: %.2f',
task, pindex_avg, pindex_std)
axes.axvline(pindex_avg, color='b', linestyle='--', linewidth=2);
# Save generated plots into datadir
figname = '{}/task_perf_{}.png'.format(self.datadir, task)
pl.savefig(figname, bbox_inches='tight')
|
apache-2.0
|
benspaulding/django
|
django/contrib/gis/db/backends/postgis/operations.py
|
13
|
25790
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured('Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
# TODO: Raise helpful exceptions as they become known.
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, basestring),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian' : PostGISDistance(prefix, operator),
'sphere' : PostGISSphereDistance(prefix, operator),
'spheroid' : PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
# Versions 1.2.2+ have KML serialization support.
if version < (1, 2, 2):
ASKML = False
else:
ASKML = 'ST_AsKML'
self.geometry_functions.update(
{'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
})
self.distance_functions['dwithin'] = (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# The union aggregate and topology operation use the same signature
# in versions 1.3+.
if version < (1, 3, 0):
UNIONAGG = 'GeomUnion'
UNION = 'Union'
MAKELINE = False
else:
UNIONAGG = 'ST_Union'
UNION = 'ST_Union'
MAKELINE = 'ST_MakeLine'
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby' : self.geometry_functions['coveredby'],
'covers' : self.geometry_functions['covers'],
'intersects' : self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps' : PostGISOperator('&&'),
}
# Creating a dictionary lookup of all GIS terms for PostGIS.
gis_terms = ['isnull']
gis_terms += self.geometry_operators.keys()
gis_terms += self.geometry_functions.keys()
self.gis_terms = dict([(term, None) for term in gis_terms])
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + 'Extent3D'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = ASKML
self.length = prefix + 'Length'
self.length3d = prefix + 'Length3D'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = MAKELINE
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points =prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + 'Perimeter3D'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = UNION
self.unionagg = UNIONAGG
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)'% (f.geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s()' % func)
row = cursor.fetchone()
except:
# Responsibility of callers to perform error handling.
raise
finally:
# Close out the connection. See #9437.
self.connection.close()
return row[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np): return np == 2
def two_to_three(np): return np >= 2 and np <=3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.postgis.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
return SpatialRefSys
|
bsd-3-clause
|
joakim-hove/django
|
tests/model_options/models/tablespaces.py
|
342
|
1853
|
from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'migrate'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'model_options_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'model_options_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').remote_field.through
Authors._meta.db_table = 'model_options_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').remote_field.through
Reviewers._meta.db_table = 'model_options_articleref_reviewers'
|
bsd-3-clause
|
Yubico/yubikey-manager
|
tests/device/conftest.py
|
1
|
2960
|
from ykman.device import connect_to_device, list_all_devices, read_info
from ykman.pcsc import list_devices
from yubikit.core import TRANSPORT
from yubikit.core.otp import OtpConnection
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SmartCardConnection
from yubikit.management import USB_INTERFACE
from functools import partial
from . import condition
import pytest
import time
import os
@pytest.fixture(scope="session")
def _device(pytestconfig):
serial = pytestconfig.getoption("device")
no_serial = pytestconfig.getoption("no_serial")
if not serial:
if no_serial:
serial = None
else:
pytest.skip("No serial specified for device tests")
reader = pytestconfig.getoption("reader")
if reader:
readers = list_devices(reader)
if len(readers) != 1:
pytest.exit("No/Multiple readers matched")
dev = readers[0]
with dev.open_connection(SmartCardConnection) as conn:
info = read_info(None, conn)
else:
devices = list_all_devices()
if len(devices) != 1:
pytest.exit("Device tests require a single YubiKey")
dev, info = devices[0]
if info.serial != serial:
pytest.exit("Device serial does not match: %d != %r" % (serial, info.serial))
return dev, info
@pytest.fixture(scope="session")
def device(_device):
return _device[0]
@pytest.fixture(scope="session")
def info(_device):
return _device[1]
@pytest.fixture(scope="session")
def version(info):
return info.version
@pytest.fixture(scope="session")
def transport(device):
return device.transport
@pytest.fixture(scope="session")
def pid(device):
return device.pid
@pytest.fixture(scope="session")
def await_reboot(transport):
delay = float(os.environ.get("REBOOT_TIME", "2.0"))
return partial(time.sleep, delay) if transport == TRANSPORT.USB else lambda: None
connection_scope = os.environ.get("CONNECTION_SCOPE", "function")
@pytest.fixture(scope=connection_scope)
@condition.transport(TRANSPORT.USB)
def otp_connection(device, info):
if USB_INTERFACE.OTP in device.pid.get_interfaces():
with connect_to_device(info.serial, [OtpConnection])[0] as c:
yield c
@pytest.fixture(scope=connection_scope)
@condition.transport(TRANSPORT.USB)
def fido_connection(device, info):
if USB_INTERFACE.FIDO in device.pid.get_interfaces():
with connect_to_device(info.serial, [FidoConnection])[0] as c:
yield c
@pytest.fixture(scope=connection_scope)
def ccid_connection(device, info):
if device.transport == TRANSPORT.NFC:
with device.open_connection(SmartCardConnection) as c:
yield c
elif USB_INTERFACE.CCID in device.pid.get_interfaces():
with connect_to_device(info.serial, [SmartCardConnection])[0] as c:
yield c
else:
pytest.skip("CCID connection not available")
|
bsd-2-clause
|
nyee/RMG-Java
|
databases/RMG_database/kinetics_libraries/Dooley/C1/remove_unused_species.py
|
11
|
1403
|
#!/usr/bin/env python
# encoding: utf-8
"""
remove_unused_species.py
Created by Richard West on 2011-03-10.
Copyright (c) 2011 MIT. All rights reserved.
"""
import sys
import os
import re
import fileinput
species = set()
for line in fileinput.input(('reactions.txt','pdepreactions.txt')):
if (line.find(' = ') == -1):
continue
if (line.strip().startswith('//')):
continue
line = line.replace("(+M)","")
reactants, products = line.split(' = ')
products, junk = products.split(None, 1)
combined = "%s+%s"%(reactants,products)
for s in combined.split('+'):
species.add(s.strip())
print "These %d species listed in reactions.txt and pdepreactions.txt" % len(species)
for s in species:
print s
print "Copying the species.txt file, removing redundant species"
outfile = file('species.new.txt','w')
infile = file('species.txt')
for line in infile:
if (line.strip().startswith('//')):
continue
if (line.strip()==''):
continue
s = line.strip()
try:
if (s in species):
while (line.strip()!=''):
outfile.write(line.strip()+'\n')
line = infile.next()
outfile.write('\n')
else:
print "Skipping %s"%s
while (line.strip()!=''):
line = infile.next()
except StopIteration:
break
outfile.close()
|
mit
|
jsilhan/rpg
|
rel-eng/travis/upload.py
|
5
|
2772
|
from copr.client import CoprClient
from operator import itemgetter
import sys
import time
import urllib2
import os
def build_project(project_name, copr_client):
found_project = False
result = copr_client.get_projects_list(sys.argv[2]).projects_list
for project in result:
if project.projectname == project_name:
found_project = True
break
if not found_project:
chroot = ["fedora-23-x86_64", "fedora-23-i386", "fedora-22-x86_64",
"fedora-22-i386", "fedora-rawhide-i386",
"fedora-rawhide-x86_64"]
copr_client.create_project(project_name, chroots=chroot)
result = copr_client.create_new_build(project_name, pkgs=[sys.argv[4]])
return result
def main():
cl = CoprClient(username=sys.argv[2], login=sys.argv[1], token=sys.argv[3],
copr_url="http://copr.fedoraproject.org")
results = build_project(sys.argv[5], cl)
for bw in results.builds_list:
build_id = bw.build_id
while True:
count_chroot = len(bw.handle.get_build_details()
.data["chroots"].items())
for ch, status in bw.handle.get_build_details()\
.data["chroots"].items():
if status in ["skipped", "failed", "succeeded"]:
count_chroot -= 1
time.sleep(10)
if count_chroot == 0:
break
sort_result = sorted(bw.handle.get_build_details().data["chroots"]
.items(), key=itemgetter(0))
i = 1
exit_code = 0
for ch, status in sort_result:
print("echo -en \"travis_fold:start:rpg-{}\\\\r\"".format(ch))
if (status == "failed"):
print("echo \"{} $(tput setaf 1)failed $(tput sgr0)\""
.format(ch))
exit_code += 1
else:
print("echo \"{} $(tput setaf 2){} $(tput sgr0)\""
.format(ch, status))
str_build_id = '0' * (8 - len(str(build_id))) + str(build_id)
url = "https://copr-be.cloud.fedoraproject.org/results/" +\
sys.argv[2] + "/" + sys.argv[5] + "/" + ch + "/" +\
str_build_id + "-rpg/build.log.gz"
logfile = urllib2.urlopen(url)
directory = os.environ["travis_home"]
output = open(directory + "/build" + str(i) + ".log.gz", 'wb')
output.write(logfile.read())
output.close()
print("zcat " + "build" + str(i) + ".log.gz")
i += 1
print("echo -en \"travis_fold:end:rpg-{}\\\\r\"".format(ch))
print("exit {}".format(exit_code))
if __name__ == '__main__':
main()
|
gpl-2.0
|
Virako/authapi
|
authapi/captcha/tests.py
|
1
|
8039
|
import json
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from api import test_data
from api.models import ACL, AuthEvent
from api.tests import JClient
from authmethods.models import Code
from captcha.models import Captcha
# Create your tests here.
class TestProcessCaptcha(TestCase):
fixtures = ['initial.json']
def setUp(self):
ae = AuthEvent(auth_method="email",
auth_method_config=test_data.authmethod_config_email_default,
extra_fields=test_data.ae_email_fields_captcha['extra_fields'],
status='started',
census="open")
ae.save()
self.ae = ae
self.aeid = ae.pk
u_admin = User(username=test_data.admin['username'])
u_admin.set_password(test_data.admin['password'])
u_admin.save()
u_admin.userdata.event = ae
u_admin.userdata.save()
acl = ACL(user=u_admin.userdata, object_type='AuthEvent', perm='edit',
object_id=self.aeid)
acl.save()
acl = ACL(user=u_admin.userdata, object_type='AuthEvent', perm='create',
object_id=0)
acl.save()
def tearDown(self):
# Removed generated captchas
captcha_dir = settings.STATIC_ROOT + '/captcha/'
captchas = [f for f in os.listdir(captcha_dir) if f.endswith('.png') ]
for c in captchas:
os.remove(captcha_dir + c)
def test_create_new_captcha(self):
c = JClient()
self.assertEqual(0, Captcha.objects.count())
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
self.assertEqual(1, Captcha.objects.count())
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_ALWAYS_EAGER=True,
BROKER_BACKEND='memory')
def test_pregenerate_captchas(self):
self.assertEqual(0, Captcha.objects.count())
c = JClient()
c.authenticate(0, test_data.admin)
response = c.post('/api/auth-event/', test_data.ae_email_fields_captcha)
self.assertEqual(response.status_code, 200)
self.assertEqual(settings.PREGENERATION_CAPTCHA, Captcha.objects.filter(used=False).count())
def test_create_authevent_email_with_captcha(self):
c = JClient()
# add census without problem with captcha
c.authenticate(0, test_data.admin)
response = c.census(self.aeid, test_data.census_email_default)
self.assertEqual(response.status_code, 200)
response = c.get('/api/auth-event/%d/census/' % self.aeid, {})
self.assertEqual(response.status_code, 200)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(r['userids']), 4)
# add register: without captcha
response = c.register(self.aeid, test_data.register_email_fields)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
# create captcha
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
captcha = Captcha.objects.all()[0]
data = test_data.register_email_fields
# add register: bad code
data.update({'captcha_code': '', 'captcha': captcha.challenge})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
# add register # TODO fix
data.update({'captcha_code': captcha.code, 'captcha': captcha.challenge})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 200)
# add register: repeat captcha invalid
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
# create captcha
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
captcha = Captcha.objects.all()[0]
data = test_data.register_email_fields
# add register: bad challenge
data.update({'captcha_code': captcha.code, 'captcha': ''})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
def test_create_authevent_sms_with_captcha(self):
self.ae.auth_method = 'sms'
self.ae.auth_method_config = test_data.authmethod_config_sms_default
self.ae.save()
c = JClient()
# add census without problem with captcha
c.authenticate(0, test_data.admin)
response = c.census(self.aeid, test_data.census_sms_default)
self.assertEqual(response.status_code, 200)
response = c.get('/api/auth-event/%d/census/' % self.aeid, {})
self.assertEqual(response.status_code, 200)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(r['userids']), 4)
# add register: without captcha
response = c.register(self.aeid, test_data.register_email_fields)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertTrue(r['msg'].count('Invalid captcha'))
# create captcha
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
captcha = Captcha.objects.all()[0]
data = test_data.register_sms_default
data.update({'tlf': '999999999'})
# add register: bad code
data.update({'captcha_code': '', 'captcha': captcha.challenge})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
# add register # TODO fix
data.update({'captcha_code': captcha.code, 'captcha': captcha.challenge})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 200)
# add register: repeat captcha invalid
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
# create captcha
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
captcha = Captcha.objects.all()[0]
data = test_data.register_sms_fields
data.update({'tlf': '888888888'})
# add register: bad challenge
data.update({'captcha_code': captcha.code, 'captcha': ''})
response = c.register(self.aeid, data)
self.assertEqual(response.status_code, 400)
r = json.loads(response.content.decode('utf-8'))
self.assertEqual(r['msg'], 'Invalid captcha')
def test_get_new_captcha_generate_other_captcha(self):
self.assertEqual(Captcha.objects.count(), 0)
self.assertEqual(Captcha.objects.filter(used=True).count(), 0)
c = JClient()
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
r = json.loads(response.content.decode('utf-8'))
self.assertTrue(r['image_url'] and r['captcha_code'])
response = c.get('/api/captcha/new/', {})
self.assertEqual(response.status_code, 200)
r = json.loads(response.content.decode('utf-8'))
self.assertTrue(r['image_url'] and r['captcha_code'])
self.assertEqual(Captcha.objects.count(), 2)
self.assertEqual(Captcha.objects.filter(used=True).count(), 2)
|
agpl-3.0
|
kakekfunky/ci-AdminLTE-HMVC
|
assets/ionicons/builder/generate.py
|
357
|
9438
|
from subprocess import call
import os
import json
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
FONTS_FOLDER_PATH = os.path.join(ROOT_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(ROOT_PATH, 'scss')
LESS_FOLDER_PATH = os.path.join(ROOT_PATH, 'less')
def main():
generate_font_files()
data = get_build_data()
rename_svg_glyph_names(data)
generate_scss(data)
generate_less(data)
generate_cheatsheet(data)
generate_component_json(data)
generate_composer_json(data)
generate_bower_json(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'ionicons.svg')
svg_file = open(svg_path, 'r+')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_less(data):
print "Generate LESS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-variables.less')
icons_file_path = os.path.join(LESS_FOLDER_PATH, '_ionicons-icons.less')
d = []
d.append('/*!');
d.append('Ionicons, v%s' % (font_version) );
d.append('Created by Ben Sperry for the Ionic Framework, http://ionicons.com/');
d.append('https://twitter.com/benjsperry https://twitter.com/ionicframework');
d.append('MIT License: https://github.com/driftyco/ionicons');
d.append('*/');
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('@ionicons-font-path: "../fonts";')
d.append('@ionicons-font-family: "%s";' % (font_name) )
d.append('@ionicons-version: "%s";' % (font_version) )
d.append('@ionicons-prefix: %s;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('@ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.@{ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' &:extend(.ion);')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.@{ionicons-prefix}%s:before { content: @ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-variables.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, '_ionicons-icons.scss')
d = []
d.append('// Ionicons Variables')
d.append('// --------------------------\n')
d.append('$ionicons-font-path: "../fonts" !default;')
d.append('$ionicons-font-family: "%s" !default;' % (font_name) )
d.append('$ionicons-version: "%s" !default;' % (font_version) )
d.append('$ionicons-prefix: %s !default;' % (css_prefix) )
d.append('')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('$ionicon-var-%s: "%s";' % (ionicon['name'], chr_code) )
f = open(variables_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('// Ionicons Icons')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.#{$ionicons-prefix}%s:before' % (ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.#{$ionicons-prefix}%s:before { content: $ionicon-var-%s; }' % (ionicon['name'], ionicon['name']) )
f = open(icons_file_path, 'w')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
print "Generate CSS From SCSS"
scss_file_path = os.path.join(SCSS_FOLDER_PATH, 'ionicons.scss')
css_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.css')
css_min_file_path = os.path.join(CSS_FOLDER_PATH, 'ionicons.min.css')
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
print "Generate Minified CSS From SCSS"
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = open(template_path, 'r')
template_html = f.read()
f.close()
f = open(icon_row_path, 'r')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = open(cheatsheet_file_path, 'w')
f.write(template_html)
f.close()
def generate_component_json(data):
print "Generate component.json"
d = {
"name": data['name'],
"repo": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"version": data['version'],
"keywords": [],
"dependencies": {},
"development": {},
"license": "MIT",
"styles": [
"css/%s.css" % (data['name'].lower())
],
"fonts": [
"fonts/%s.eot" % (data['name'].lower()),
"fonts/%s.svg" % (data['name'].lower()),
"fonts/%s.ttf" % (data['name'].lower()),
"fonts/%s.woff" % (data['name'].lower())
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
component_file_path = os.path.join(ROOT_PATH, 'component.json')
f = open(component_file_path, 'w')
f.write(txt)
f.close()
def generate_composer_json(data):
print "Generate composer.json"
d = {
"name": "driftyco/ionicons",
"description": "The premium icon font for Ionic Framework.",
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"homepage": "http://ionicons.com/",
"authors": [
{
"name": "Ben Sperry",
"email": "[email protected]",
"role": "Designer",
"homepage": "https://twitter.com/benjsperry"
},
{
"name": "Adam Bradley",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/adamdbradley"
},
{
"name": "Max Lynch",
"email": "[email protected]",
"role": "Developer",
"homepage": "https://twitter.com/maxlynch"
}
],
"extra": {},
"license": [ "MIT" ]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
composer_file_path = os.path.join(ROOT_PATH, 'composer.json')
f = open(composer_file_path, 'w')
f.write(txt)
f.close()
def generate_bower_json(data):
print "Generate bower.json"
d = {
"name": data['name'],
"version": data['version'],
"homepage": "https://github.com/driftyco/ionicons",
"authors": [
"Ben Sperry <[email protected]>",
"Adam Bradley <[email protected]>",
"Max Lynch <[email protected]>"
],
"description": "Ionicons - free and beautiful icons from the creators of Ionic Framework",
"main": [
"css/%s.css" % (data['name'].lower()),
"fonts/*"
],
"keywords": [ "fonts", "icon font", "icons", "ionic", "web font"],
"license": "MIT",
"ignore": [
"**/.*",
"builder",
"node_modules",
"bower_components",
"test",
"tests"
]
}
txt = json.dumps(d, indent=4, separators=(',', ': '))
bower_file_path = os.path.join(ROOT_PATH, 'bower.json')
f = open(bower_file_path, 'w')
f.write(txt)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = open(build_data_path, 'r')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
|
mit
|
urandu/roboto
|
scripts/common_tests.py
|
3
|
11855
|
# coding=UTF-8
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common tests for different targets."""
import glob
import unittest
from fontTools import ttLib
from nototools import coverage
from nototools import font_data
import freetype
import layout
import roboto_data
def get_rendered_char_height(font_filename, font_size, char, target='mono'):
if target == 'mono':
render_params = freetype.FT_LOAD_TARGET_MONO
elif target == 'lcd':
render_params = freetype.FT_LOAD_TARGET_LCD
render_params |= freetype.FT_LOAD_RENDER
face = freetype.Face(font_filename)
face.set_char_size(font_size*64)
face.load_char(char, render_params)
return face.glyph.bitmap.rows
def load_fonts(patterns, expected_count=None):
"""Load all fonts specified in the patterns.
Also assert that the number of the fonts found is exactly the same as
expected_count."""
all_font_files = []
for pattern in patterns:
all_font_files += glob.glob(pattern)
all_fonts = [ttLib.TTFont(font) for font in all_font_files]
if expected_count:
assert len(all_font_files) == expected_count
return all_font_files, all_fonts
class FontTest(unittest.TestCase):
"""Parent class for all font tests."""
loaded_fonts = None
class TestItalicAngle(FontTest):
"""Test the italic angle of fonts."""
def setUp(self):
_, self.fonts = self.loaded_fonts
def test_italic_angle(self):
"""Tests the italic angle of fonts to be correct."""
for font in self.fonts:
post_table = font['post']
if 'Italic' in font_data.font_name(font):
expected_angle = -12.0
else:
expected_angle = 0.0
self.assertEqual(post_table.italicAngle, expected_angle)
class TestMetaInfo(FontTest):
"""Test various meta information."""
def setUp(self):
_, self.fonts = self.loaded_fonts
def test_mac_style(self):
"""Tests the macStyle of the fonts to be correct.
Bug: https://code.google.com/a/google.com/p/roboto/issues/detail?id=8
"""
for font in self.fonts:
font_name = font_data.font_name(font)
bold = ('Bold' in font_name) or ('Black' in font_name)
italic = 'Italic' in font_name
expected_mac_style = (italic << 1) | bold
self.assertEqual(font['head'].macStyle, expected_mac_style)
def test_fs_type(self):
"""Tests the fsType of the fonts to be 0.
fsType of 0 marks the font free for installation, embedding, etc.
Bug: https://code.google.com/a/google.com/p/roboto/issues/detail?id=29
"""
for font in self.fonts:
self.assertEqual(font['OS/2'].fsType, 0)
def test_vendor_id(self):
"""Tests the vendor ID of the fonts to be 'GOOG'."""
for font in self.fonts:
self.assertEqual(font['OS/2'].achVendID, 'GOOG')
def test_us_weight(self):
"Tests the usWeight of the fonts to be correct."""
for font in self.fonts:
weight = roboto_data.extract_weight_name(font_data.font_name(font))
expected_numeric_weight = roboto_data.WEIGHTS[weight]
self.assertEqual(
font['OS/2'].usWeightClass,
expected_numeric_weight)
def test_version_numbers(self):
"Tests the two version numbers of the font to be correct."""
for font in self.fonts:
build_number = roboto_data.get_build_number()
expected_version = '2.' + build_number
version = font_data.font_version(font)
usable_part_of_version = version.split(';')[0]
self.assertEqual(usable_part_of_version,
'Version ' + expected_version)
revision = font_data.printable_font_revision(font, accuracy=5)
self.assertEqual(revision, expected_version)
class TestNames(FontTest):
"""Tests various strings in the name table."""
def setUp(self):
_, self.fonts = self.loaded_fonts
self.condensed_family_name = self.family_name + ' Condensed'
self.names = []
for font in self.fonts:
self.names.append(font_data.get_name_records(font))
def test_copyright(self):
"""Tests the copyright message."""
for records in self.names:
self.assertEqual(
records[0],
'Copyright 2011 Google Inc. All Rights Reserved.')
def test_family_name(self):
"""Tests the family name."""
for records in self.names:
self.assertIn(records[1],
[self.family_name, self.condensed_family_name])
if 16 in records:
self.assertEqual(records[16], records[1])
def test_postscript_name_for_spaces(self):
"""Tests that there are no spaces in PostScript names."""
for records in self.names:
self.assertFalse(' ' in records[6])
class TestDigitWidths(FontTest):
"""Tests the width of digits."""
def setUp(self):
self.font_files, self.fonts = self.loaded_fonts
self.digits = [
'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine']
def test_digit_widths(self):
"""Tests all decimal digits to make sure they have the same width."""
for font in self.fonts:
hmtx_table = font['hmtx']
widths = [hmtx_table[digit][0] for digit in self.digits]
self.assertEqual(len(set(widths)), 1)
def test_superscript_digits(self):
"""Tests that 'numr' features maps digits to Unicode superscripts."""
ascii_digits = '0123456789'
superscript_digits = u'⁰¹²³⁴⁵⁶⁷⁸⁹'
for font_file in self.font_files:
numr_glyphs = layout.get_advances(
ascii_digits, font_file, '--features=numr')
superscript_glyphs = layout.get_advances(
superscript_digits, font_file)
self.assertEqual(superscript_glyphs, numr_glyphs)
class TestCharacterCoverage(FontTest):
"""Tests character coverage."""
def setUp(self):
_, self.fonts = self.loaded_fonts
self.LEGACY_PUA = frozenset({0xEE01, 0xEE02, 0xF6C3})
def test_inclusion_of_legacy_pua(self):
"""Tests that legacy PUA characters remain in the fonts."""
for font in self.fonts:
charset = coverage.character_set(font)
for char in self.LEGACY_PUA:
self.assertIn(char, charset)
def test_non_inclusion_of_other_pua(self):
"""Tests that there are not other PUA characters except legacy ones."""
for font in self.fonts:
charset = coverage.character_set(font)
pua_chars = {
char for char in charset
if 0xE000 <= char <= 0xF8FF or 0xF0000 <= char <= 0x10FFFF}
self.assertTrue(pua_chars <= self.LEGACY_PUA)
def test_lack_of_unassigned_chars(self):
"""Tests that unassigned characters are not in the fonts."""
for font in self.fonts:
charset = coverage.character_set(font)
self.assertNotIn(0x2072, charset)
self.assertNotIn(0x2073, charset)
self.assertNotIn(0x208F, charset)
def test_inclusion_of_sound_recording_copyright(self):
"""Tests that sound recording copyright symbol is in the fonts."""
for font in self.fonts:
charset = coverage.character_set(font)
self.assertIn(
0x2117, charset, # SOUND RECORDING COPYRIGHT
'U+2117 not found in %s.' % font_data.font_name(font))
class TestLigatures(FontTest):
"""Tests formation or lack of formation of ligatures."""
def setUp(self):
self.fontfiles, _ = self.loaded_fonts
def test_lack_of_ff_ligature(self):
"""Tests that the ff ligature is not formed by default."""
for fontfile in self.fontfiles:
advances = layout.get_advances('ff', fontfile)
self.assertEqual(len(advances), 2)
def test_st_ligatures(self):
"""Tests that st ligatures are formed by dlig."""
for fontfile in self.fontfiles:
for combination in [u'st', u'ſt']:
normal = layout.get_glyphs(combination, fontfile)
ligated = layout.get_glyphs(
combination, fontfile, '--features=dlig')
self.assertTrue(len(normal) == 2 and len(ligated) == 1)
class TestFeatures(FontTest):
"""Tests typographic features."""
def setUp(self):
self.fontfiles, _ = self.loaded_fonts
def test_smcp_coverage(self):
"""Tests that smcp is supported for our required set."""
with open('res/smcp_requirements.txt') as smcp_reqs_file:
smcp_reqs_list = []
for line in smcp_reqs_file.readlines():
line = line[:line.index(' #')]
smcp_reqs_list.append(unichr(int(line, 16)))
for fontfile in self.fontfiles:
chars_with_no_smcp = []
for char in smcp_reqs_list:
normal = layout.get_glyphs(char, fontfile)
smcp = layout.get_glyphs(char, fontfile, '--features=smcp')
if normal == smcp:
chars_with_no_smcp.append(char)
self.assertEqual(
chars_with_no_smcp, [],
("smcp feature is not applied to '%s'" %
u''.join(chars_with_no_smcp).encode('UTF-8')))
EXPECTED_YMIN = -555
EXPECTED_YMAX = 2163
class TestVerticalMetrics(FontTest):
"""Test the vertical metrics of fonts."""
def setUp(self):
self.font_files, self.fonts = self.loaded_fonts
def test_ymin_ymax(self):
"""Tests yMin and yMax to be equal to Roboto v1 values.
Android requires this, and web fonts expect this.
"""
for font in self.fonts:
head_table = font['head']
self.assertEqual(head_table.yMin, EXPECTED_YMIN)
self.assertEqual(head_table.yMax, EXPECTED_YMAX)
def test_glyphs_ymin_ymax(self):
"""Tests yMin and yMax of all glyphs to not go outside the range."""
for font_file, font in zip(self.font_files, self.fonts):
glyf_table = font['glyf']
for glyph_name in glyf_table.glyphOrder:
try:
y_min = glyf_table[glyph_name].yMin
y_max = glyf_table[glyph_name].yMax
except AttributeError:
continue
self.assertTrue(
EXPECTED_YMIN <= y_min and y_max <= EXPECTED_YMAX,
('The vertical metrics for glyph %s in %s exceed the '
'acceptable range: yMin=%d, yMax=%d' % (
glyph_name, font_file, y_min, y_max)))
def test_hhea_table_metrics(self):
"""Tests ascent, descent, and lineGap to be equal to Roboto v1 values.
"""
for font in self.fonts:
hhea_table = font['hhea']
self.assertEqual(hhea_table.descent, -500)
self.assertEqual(hhea_table.ascent, 1900)
self.assertEqual(hhea_table.lineGap, 0)
|
apache-2.0
|
Jaemu/haiku.py
|
lib/yaml/scanner.py
|
434
|
52630
|
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from error import MarkedYAMLError
from tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == u'\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == u'%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == u'-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == u'.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == u'\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == u'[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == u'{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == u']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == u'}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == u'?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == u':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == u'*':
return self.fetch_alias()
# Is it an anchor?
if ch == u'&':
return self.fetch_anchor()
# Is it a tag?
if ch == u'!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == u'|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == u'>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == u'\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == u'\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token"
% ch.encode('utf-8'), self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in self.possible_simple_keys.keys():
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'---' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'...' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
and (ch == u'-' or (not self.flow_level and ch in u'?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == u'\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == u'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == u'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not (u'0' <= ch <= u'9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 0
while u'0' <= self.peek(length) <= u'9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == u' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != u' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == u'*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == u'<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != u'>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek().encode('utf-8'),
self.get_mark())
self.forward()
elif ch in u'\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = u'!'
self.forward()
else:
length = 1
use_handle = False
while ch not in u'\0 \r\n\x85\u2028\u2029':
if ch == u'!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = u'!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = u'!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = u''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != u'\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in u' \t'
length = 0
while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != u'\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == u'\n' \
and leading_non_space and self.peek() not in u' \t':
if not breaks:
chunks.append(u' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == u'\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(u' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch.encode('utf-8'), self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() != u' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
while self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
u'0': u'\0',
u'a': u'\x07',
u'b': u'\x08',
u't': u'\x09',
u'\t': u'\x09',
u'n': u'\x0A',
u'v': u'\x0B',
u'f': u'\x0C',
u'r': u'\x0D',
u'e': u'\x1B',
u' ': u'\x20',
u'\"': u'\"',
u'\\': u'\\',
u'N': u'\x85',
u'_': u'\xA0',
u'L': u'\u2028',
u'P': u'\u2029',
}
ESCAPE_CODES = {
u'x': 2,
u'u': 4,
u'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == u'\'' and self.peek(1) == u'\'':
chunks.append(u'\'')
self.forward(2)
elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == u'\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k).encode('utf-8')), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(unichr(code))
self.forward(length)
elif ch in u'\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in u' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == u'\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in u' \t':
self.forward()
if self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == u'#':
break
while True:
ch = self.peek(length)
if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == u':' and
self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in u',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == u':'
and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == u'#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in u' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != u'!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 1
ch = self.peek(length)
if ch != u' ':
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if ch != u'!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
if ch == u'%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch.encode('utf-8'),
self.get_mark())
return u''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
bytes = []
mark = self.get_mark()
while self.peek() == u'%':
self.forward()
for k in range(2):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
(self.peek(k).encode('utf-8')), self.get_mark())
bytes.append(chr(int(self.prefix(2), 16)))
self.forward(2)
try:
value = unicode(''.join(bytes), 'utf-8')
except UnicodeDecodeError, exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in u'\r\n\x85':
if self.prefix(2) == u'\r\n':
self.forward(2)
else:
self.forward()
return u'\n'
elif ch in u'\u2028\u2029':
self.forward()
return ch
return u''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass
|
apache-2.0
|
pilou-/ansible
|
lib/ansible/modules/files/template.py
|
12
|
6773
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
short_description: Template a file out to a remote server
description:
- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
- Documentation on the template formatting can be found in the
L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
- Additional variables listed below can be used in templates.
- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
- C(template_host) contains the node name of the template's machine.
- C(template_uid) is the numeric user id of the owner.
- C(template_path) is the path of the template.
- C(template_fullpath) is the absolute path of the template.
- C(template_destpath) is the path of the template on the remote system (added in 2.8).
- C(template_run_date) is the date that the template was rendered.
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller.
- This can be a relative or an absolute path.
type: path
required: yes
dest:
description:
- Location to render the template to on the remote machine.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
type: str
choices: [ '\n', '\r', '\r\n' ]
default: '\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
type: str
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
type: str
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
type: str
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
type: str
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- Determine when newlines should be removed from blocks.
- When set to C(yes) the first newline after a block is removed (block, not variable tag!).
type: bool
default: yes
version_added: '2.4'
lstrip_blocks:
description:
- Determine when leading spaces and tabs should be stripped.
- When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
- This functionality requires Jinja 2.7 or newer.
type: bool
default: no
version_added: '2.6'
force:
description:
- Determine when the file is being transferred if the destination already exists.
- When set to C(yes), replace the remote file when contents are different than the source.
- When set to C(no), the file will only be transferred if the destination does not exist.
type: bool
default: yes
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
output_encoding:
description:
- Overrides the encoding used to write the template file defined by C(dest).
- It defaults to C(utf-8), but any encoding supported by python can be used.
- The source template file must always be encoded using C(utf-8), for homogeneity.
type: str
default: utf-8
version_added: '2.7'
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
- Since Ansible 0.9, templates are loaded with C(trim_blocks=True).
- >
Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
- Using raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively
evaluated.
- You can use the M(copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
- For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: copy
- module: win_copy
- module: win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/files.conf
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
template:
src: named.conf_{{ ansible_os_family}}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
gpl-3.0
|
hjarmstrong/Odme-plusplus
|
3rd/build/tools/build/v2/test/core_variables_in_actions.py
|
45
|
1241
|
#!/usr/bin/python
# Copyright 2012. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that variables in actions get expanded but double quote characters
# get treated as regular characters and not string literal delimiters when
# determining string tokens concatenated to the variable being expanded.
#
# We also take care to make this test work correctly when run using both
# Windows and Unix echo command variant. That is why we add the extra single
# quotes around the text being echoed - they will make the double quotes be
# displayed as regular characters in both cases but will be displayed
# themselves only when using the Windows cmd shell's echo command.
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
rule dummy ( i )
{
local a = 1 2 3 ;
ECHO From rule: $(a)" seconds" ;
a on $(i) = $(a) ;
}
actions dummy
{
echo 'From action: $(a)" seconds"'
}
dummy all ;
""")
t.run_build_system(["-ffile.jam", "-d1"])
t.expect_output_lines("From rule: 1 seconds 2 seconds 3 seconds")
t.expect_output_lines('*From action: 1" 2" 3" seconds"*')
t.cleanup()
|
mit
|
johnshiver/meetme
|
meet-me/users/admin.py
|
183
|
1048
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
bsd-3-clause
|
cbclab/MDT
|
mdt/lib/components.py
|
1
|
14160
|
import logging
from importlib.machinery import SourceFileLoader
import inspect
import os
from collections import defaultdict
from contextlib import contextmanager
import mdt
import mot
from mdt.configuration import get_config_dir
from mdt.model_building.signal_noise_models import SignalNoiseModel
from mot.library_functions import CLLibrary
from mdt.model_building.likelihood_functions import LikelihoodFunction
from mdt.model_building.parameter_functions.transformations import AbstractTransformation
__author__ = 'Robbert Harms'
__date__ = '2018-03-22'
__maintainer__ = 'Robbert Harms'
__email__ = '[email protected]'
__licence__ = 'LGPL v3'
supported_component_types = ('batch_profiles', 'compartment_models', 'composite_models',
'library_functions', 'parameters', 'likelihood_functions',
'signal_noise_functions', 'parameter_transforms')
class _ComponentLibrary:
def __init__(self):
"""Holds the reference to all defined components, by component type and by name.
For each component type several components may be defined with different or equal names. If the names are equal
they are added to a stack and only the last element is returned. Components may also be removed again from
the stack (in a random access method).
"""
self._library = {}
self._mutation_history = []
self.reset()
def get_current_history_length(self):
"""Get the current length of the history. Useful for undoing history changes."""
return len(self._mutation_history)
def undo_history_until(self, history_ind):
"""Pop the history stack until we are at the given index (length).
Args:
history_ind (int): the desired length of the history stack
"""
if history_ind < len(self._mutation_history):
for ind in range(len(self._mutation_history) - history_ind):
item = self._mutation_history.pop()
if item.action == 'add':
self._library[item.component_type][item.name].pop()
if item.action == 'remove':
self._library[item.component_type][item.name].append(item.adapter)
def reset(self):
"""Clear the library by removing all available components.
This also resets the mutation history.
"""
self._library = {component_type: defaultdict(list) for component_type in supported_component_types}
self._mutation_history = []
def add_component(self, component_type, name, component_class, meta_info=None):
"""Adds a component class to the library.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
component_class (class): the class or constructor function for the component
meta_info (dict): a dictionary with meta information about the component
"""
adapter = _DirectComponent(component_class, meta_info=meta_info)
self._library[component_type][name].append(adapter)
self._mutation_history.append(_LibraryHistoryDelta('add', component_type, name, adapter))
def add_template_component(self, template):
"""Adds a component template to the library.
Args:
template (mdt.component_templates.base.ComponentTemplateMeta): the template for constructing the component
class.
"""
adapter = _ComponentFromTemplate(template)
self._library[template.component_type][template.name].append(adapter)
self._mutation_history.append(_LibraryHistoryDelta('add', template.component_type, template.name, adapter))
def get_component(self, component_type, name):
"""Get the component class for the component of the given type and name.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
Returns:
class: the component class.
"""
if not self.has_component(component_type, name):
raise ValueError('Can not find a component of type "{}" with name "{}"'.format(component_type, name))
return self._library[component_type][name][-1].get_component()
def get_meta_info(self, component_type, name):
"""Get the meta information dictionary for the component of the given type and name.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
Returns:
dict: the meta information
"""
if not self.has_component(component_type, name):
raise ValueError('Can not find a component of type "{}" with name "{}"'.format(component_type, name))
return self._library[component_type][name][-1].get_meta_info()
def get_component_list(self, component_type):
"""Get a list of available components by component type.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
Returns:
list of str: list of available components
"""
return list(self._library[component_type].keys())
def has_component(self, component_type, name):
"""Check if a component is available.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
Returns:
boolean: if we have a component available of the given type and given name.
"""
return name in self._library[component_type] and len(self._library[component_type][name])
def get_template(self, component_type, name):
"""Get the template class for the given component.
This may not be supported for all component types and components. That is, since components can either be
added as classes or as templates, we can not guarantee a template class for any requested component.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
Returns:
mdt.component_templates.base.ComponentTemplateMeta: a template class if possible.
Raises:
ValueError: if no component of the given name could be found.
"""
if not self.has_component(component_type, name):
raise ValueError('The component with the name "{}" '
'of type "{}" could be found.'.format(name, component_type))
return self._library[component_type][name][-1].get_template()
def remove_last_entry(self, component_type, name):
"""Removes the last entry of the given component.
Args:
component_type (str): the type of the component, see ``supported_component_types``.
name (str): the name of the component
"""
adapter = self._library[component_type][name].pop()
if not len(self._library[component_type][name]):
del self._library[component_type][name]
self._mutation_history.append(_LibraryHistoryDelta('remove', component_type, name, adapter))
class _ComponentAdapter:
def get_component(self):
"""Build or return the actual component class.
Since the component library supports both ``component classes`` as ``template classes`` we need an adapter class
to build the actual component if necessary.
Returns:
class: the component class
"""
raise NotImplementedError()
def get_meta_info(self):
"""Get the meta info of this component
Returns:
dict: the meta info
"""
raise NotImplementedError()
def get_template(self):
"""If supported, gets the template of this component.
Returns:
mdt.component_templates.base.ComponentTemplateMeta: a template class if possible.
"""
raise NotImplementedError()
class _DirectComponent(_ComponentAdapter):
def __init__(self, component, meta_info=None):
self.component = component
self.meta_info = meta_info or {}
def get_component(self):
return self.component
def get_meta_info(self):
return self.meta_info
def get_template(self):
raise ValueError('Can not build template from component class.')
class _ComponentFromTemplate(_ComponentAdapter):
def __init__(self, template):
self.template = template
def get_component(self):
return self.template()
def get_meta_info(self):
return self.template.meta_info()
def get_template(self):
return self.template
class _LibraryHistoryDelta:
def __init__(self, action, component_type, name, adapter):
"""Representation of a history change in the component library.
Args:
action (str): one of ``remove`` or ``add``.
component_type (str): the type of the component
name (str): the name of the component.
adapter (_ComponentAdapter): the adapter instance
"""
self.component_type = component_type
self.name = name
self.adapter = adapter
self.action = action
component_library = _ComponentLibrary()
def _add_doc(value):
"""Add a docstring to the given value."""
def _doc(func):
func.__doc__ = value
return func
return _doc
@_add_doc(_ComponentLibrary.add_component.__doc__)
def add_component(component_type, name, cls, meta_info=None):
return component_library.add_component(component_type, name, cls, meta_info)
@_add_doc(_ComponentLibrary.add_template_component.__doc__)
def add_template_component(template):
return component_library.add_template_component(template)
@_add_doc(_ComponentLibrary.get_template.__doc__)
def get_template(component_type, name):
return component_library.get_template(component_type, name)
@_add_doc(_ComponentLibrary.get_component.__doc__)
def get_component(component_type, name):
return component_library.get_component(component_type, name)
@_add_doc(_ComponentLibrary.has_component.__doc__)
def has_component(component_type, name):
return component_library.has_component(component_type, name)
@_add_doc(_ComponentLibrary.get_component_list.__doc__)
def get_component_list(component_type):
return component_library.get_component_list(component_type)
@_add_doc(_ComponentLibrary.get_meta_info.__doc__)
def get_meta_info(component_type, name):
return component_library.get_meta_info(component_type, name)
@_add_doc(_ComponentLibrary.remove_last_entry.__doc__)
def remove_last_entry(component_type, name):
return component_library.remove_last_entry(component_type, name)
@contextmanager
def temporary_component_updates():
"""Creates a context that keeps track of the component mutations and undoes them when the context exits.
This can be useful to temporarily add or remove some components from the library.
"""
history_ind = component_library.get_current_history_length()
yield
component_library.undo_history_until(history_ind)
def reload():
"""Clear the component library and reload all default components.
This will load the components from the user home folder and from the MOT library.
"""
component_library.reset()
_load_mot_components()
_load_home_folder()
def get_model(model_name):
"""Load the class of one of the available models.
Args:
model_name (str): One of the models from the composite models
Returns:
class: A composite model.
"""
try:
return component_library.get_component('composite_models', model_name)
except ValueError:
raise ValueError('The model with the name "{}" could not be found.'.format(model_name))
def get_batch_profile(batch_profile):
"""Load the class of one of the available batch profiles
Args:
batch_profile (str): The name of the batch profile class to load
Returns:
cls: the batch profile class
"""
return component_library.get_component('batch_profiles', batch_profile)
def _load_mot_components():
"""Load all the components from MOT."""
items = [
(mot.library_functions, CLLibrary, 'library_functions'),
(mdt.model_building.likelihood_functions, LikelihoodFunction, 'likelihood_functions'),
(mdt.model_building.signal_noise_models, SignalNoiseModel, 'signal_noise_functions'),
(mdt.model_building.parameter_functions.transformations, AbstractTransformation, 'parameter_transforms'),
]
for module_obj, class_type, component_type in items:
module_items = inspect.getmembers(module_obj, lambda cls: inspect.isclass(cls) and issubclass(cls, class_type))
for item in [x[0] for x in module_items if x[0] != class_type.__name__]:
add_component(component_type, item, getattr(module_obj, item))
def _load_home_folder():
"""Load the components from the MDT home folder.
This first loads all components from the ``standard`` folder and next all those from the ``user`` folder.
"""
for user_type in ['standard', 'user']:
base_path = os.path.join(get_config_dir(), 'components', user_type)
for path, sub_dirs, files in os.walk(base_path):
for file in files:
if file.endswith('.py') and not file.startswith('__'):
full_path = os.path.join(path, file)
module_name = os.path.splitext(full_path[len(os.path.join(get_config_dir(), 'components')):])[0]
try:
SourceFileLoader(module_name, full_path).load_module()
except Exception as e:
logger = logging.getLogger(__name__)
logger.warning('Could not load the file "{}", exception: "{}".'.format(full_path, str(e)))
|
lgpl-3.0
|
vineodd/PIMSim
|
GEM5Simulation/gem5/src/arch/x86/isa/insts/x87/load_constants/load_logarithm.py
|
70
|
2566
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nilay Vaish
microcode = '''
def macroop FLDL2E {
lfpimm ufp1, 1.44269504089
movfp st(-1), ufp1, spm=-1
};
def macroop FLDL2T {
lfpimm ufp1, 3.32192809489
movfp st(-1), ufp1, spm=-1
};
def macroop FLDLG2 {
lfpimm ufp1, 0.30102999566
movfp st(-1), ufp1, spm=-1
};
def macroop FLDLN2 {
lfpimm ufp1, 0.69314718056
movfp st(-1), ufp1, spm=-1
};
'''
|
gpl-3.0
|
smallyear/linuxLearn
|
salt/salt/modules/bigip.py
|
1
|
69568
|
# -*- coding: utf-8 -*-
'''
An execution module which can manipulate an f5 bigip via iControl REST
:maturity: develop
:platform: f5_bigip_11.6
'''
# Import python libs
from __future__ import absolute_import
import json
import logging as logger
# Import third party libs
try:
import requests
import requests.exceptions
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Import 3rd-party libs
import salt.ext.six as six
# Import salt libs
import salt.utils
import salt.output
import salt.exceptions
# Setup the logger
log = logger.getLogger(__name__)
def __virtual__():
'''
Only return if requests is installed
'''
return 'bigip' if HAS_LIBS else False
BIG_IP_URL_BASE = 'https://{host}/mgmt/tm'
def _build_session(username, password, trans_label=None):
'''
Create a session to be used when connecting to iControl REST.
'''
bigip = requests.session()
bigip.auth = (username, password)
bigip.verify = False
bigip.headers.update({'Content-Type': 'application/json'})
if trans_label:
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=trans_label))
if trans_id:
bigip.headers.update({'X-F5-REST-Coordination-Id': trans_id})
else:
bigip.headers.update({'X-F5-REST-Coordination-Id': None})
return bigip
def _load_response(response):
'''
Load the response from json data, return the dictionary or raw text
'''
try:
data = json.loads(response.text)
except ValueError:
data = response.text
ret = {'code': response.status_code, 'content': data}
return ret
def _load_connection_error(hostname, error):
'''
Format and Return a connection error
'''
ret = {'code': None, 'content': 'Error: Unable to connect to the bigip device: {host}\n{error}'.format(host=hostname, error=error)}
return ret
def _loop_payload(params):
'''
Pass in a dictionary of parameters, loop through them and build a payload containing,
parameters who's values are not None.
'''
#construct the payload
payload = {}
#set the payload
for param, value in six.iteritems(params):
if value is not None:
payload[param] = value
return payload
def _build_list(option_value, item_kind):
'''
pass in an option to check for a list of items, create a list of dictionary of items to set
for this option
'''
#specify profiles if provided
if option_value is not None:
items = []
#if user specified none, return an empty list
if option_value == 'none':
return items
#was a list already passed in?
if not isinstance(option_value, list):
values = option_value.split(',')
else:
values = option_value
for value in values:
# sometimes the bigip just likes a plain ol list of items
if item_kind is None:
items.append(value)
# other times it's picky and likes key value pairs...
else:
items.append({'kind': item_kind, 'name': value})
return items
return None
def _determine_toggles(payload, toggles):
'''
BigIP can't make up its mind if it likes yes / no or true or false.
Figure out what it likes to hear without confusing the user.
'''
for toggle, definition in six.iteritems(toggles):
#did the user specify anything?
if definition['value'] is not None:
#test for yes_no toggle
if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'yes_no':
payload[toggle] = 'yes'
elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'yes_no':
payload[toggle] = 'no'
#test for true_false toggle
if (definition['value'] is True or definition['value'] == 'yes') and definition['type'] == 'true_false':
payload[toggle] = True
elif (definition['value'] is False or definition['value'] == 'no') and definition['type'] == 'true_false':
payload[toggle] = False
return payload
def _set_value(value):
'''
A function to detect if user is trying to pass a dictionary or list. parse it and return a
dictionary list or a string
'''
logger.error(value)
#don't continue if already an acceptable data-type
if isinstance(value, bool) or isinstance(value, dict) or isinstance(value, list):
return value
#check if json
if value.startswith('j{') and value.endswith('}j'):
value = value.replace('j{', '{')
value = value.replace('}j', '}')
try:
return json.loads(value)
except Exception:
raise salt.exceptions.CommandExecutionError
#detect list of dictionaries
if '|' in value and r'\|' not in value:
values = value.split('|')
items = []
for value in values:
items.append(_set_value(value))
return items
#parse out dictionary if detected
if ':' in value and r'\:' not in value:
options = {}
#split out pairs
key_pairs = value.split(',')
for key_pair in key_pairs:
k = key_pair.split(':')[0]
v = key_pair.split(':')[1]
options[k] = v
return options
#try making a list
elif ',' in value and r'\,' not in value:
value_items = value.split(',')
return value_items
#just return a string
else:
#remove escape chars if added
if r'\|' in value:
value = value.replace(r'\|', '|')
if r'\:' in value:
value = value.replace(r'\:', ':')
if r'\,' in value:
value = value.replace(r'\,', ',')
return value
def start_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and start a new transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
The name / alias for this transaction. The actual transaction
id will be stored within a grain called ``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.start_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
payload = {}
#post to REST to get trans id
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/transaction', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
#extract the trans_id
data = _load_response(response)
if data['code'] == 200:
trans_id = data['content']['transId']
__salt__['grains.setval']('bigip_f5_trans', {label: trans_id})
return 'Transaction: {trans_id} - has successfully been stored in the grain: bigip_f5_trans:{label}'.format(trans_id=trans_id,
label=label)
else:
return data
def list_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and list an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
the label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.list_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
#post to REST to get trans id
try:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}/commands'.format(trans_id=trans_id))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def commit_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and commit an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
the label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.commit_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
payload = {}
payload['state'] = 'VALIDATING'
#patch to REST to get trans id
try:
response = bigip_session.patch(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}'.format(trans_id=trans_id), data=json.dumps(payload))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def delete_transaction(hostname, username, password, label):
'''
A function to connect to a bigip device and delete an existing transaction.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
label
The label of this transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.delete_transaction bigip admin admin my_transaction
'''
#build the session
bigip_session = _build_session(username, password)
#pull the trans id from the grain
trans_id = __salt__['grains.get']('bigip_f5_trans:{label}'.format(label=label))
if trans_id:
#patch to REST to get trans id
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/transaction/{trans_id}'.format(trans_id=trans_id))
return _load_response(response)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
else:
return 'Error: the label for this transaction was not defined as a grain. Begin a new transaction using the' \
' bigip.start_transaction function'
def list_node(hostname, username, password, name=None, trans_label=None):
'''
A function to connect to a bigip device and list all nodes or a specific node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to list. If no name is specified than all nodes
will be listed.
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.list_node bigip admin admin my-node
'''
#build sessions
bigip_session = _build_session(username, password, trans_label)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_node(hostname, username, password, name, address, trans_label=None):
'''
A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#construct the payload
payload = {}
payload['name'] = name
payload['address'] = address
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_node(hostname, username, password, name,
connection_limit=None,
description=None,
dynamic_ratio=None,
logging=None,
monitor=None,
rate_limit=None,
ratio=None,
session=None,
state=None,
trans_label=None):
'''
A function to connect to a bigip device and modify an existing node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node to modify
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
logging
[enabled | disabled]
monitor
[[name] | none | default]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[user-down | user-up ]
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.modify_node bigip admin admin 10.1.1.2 ratio=2 logging=enabled
'''
params = {
'connection-limit': connection_limit,
'description': description,
'dynamic-ratio': dynamic_ratio,
'logging': logging,
'monitor': monitor,
'rate-limit': rate_limit,
'ratio': ratio,
'session': session,
'state': state,
}
#build session
bigip_session = _build_session(username, password, trans_label)
#build payload
payload = _loop_payload(params)
payload['name'] = name
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_node(hostname, username, password, name, trans_label=None):
'''
A function to connect to a bigip device and delete a specific node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node which will be deleted.
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-node
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/node/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_pool(hostname, username, password, name=None):
'''
A function to connect to a bigip device and list all pools or a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to list. If no name is specified then all pools
will be listed.
CLI Example::
salt '*' bigip.list_pool bigip admin admin my-pool
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/?expandSubcollections=true'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_pool(hostname, username, password, name, members=None,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None):
'''
A function to connect to a bigip device and create a pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to create.
members
List of comma delimited pool members to add to the pool.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[enabled | disabled]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_depth_limit
[integer]
queue_on_connection_limit
[enabled | disabled]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
CLI Example::
salt '*' bigip.create_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 monitor=http
'''
params = {
'description': description,
'gateway-failsafe-device': gateway_failsafe_device,
'ignore-persisted-weight': ignore_persisted_weight,
'ip-tos-to-client': ip_tos_to_client,
'ip-tos-to-server': ip_tos_to_server,
'link-qos-to-client': link_qos_to_client,
'link-qos-to-server': link_qos_to_server,
'load-balancing-mode': load_balancing_mode,
'min-active-members': min_active_members,
'min-up-members': min_up_members,
'min-up-members-action': min_up_members_action,
'min-up-members-checking': min_up_members_checking,
'monitor': monitor,
'profiles': profiles,
'queue-on-connection-limit': queue_on_connection_limit,
'queue-depth-limit': queue_depth_limit,
'queue-time-limit': queue_time_limit,
'reselect-tries': reselect_tries,
'service-down-action': service_down_action,
'slow-ramp-time': slow_ramp_time
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'allow-nat': {'type': 'yes_no', 'value': allow_nat},
'allow-snat': {'type': 'yes_no', 'value': allow_snat}
}
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify members if provided
if members is not None:
payload['members'] = _build_list(members, 'ltm:pool:members')
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_pool(hostname, username, password, name,
allow_nat=None,
allow_snat=None,
description=None,
gateway_failsafe_device=None,
ignore_persisted_weight=None,
ip_tos_to_client=None,
ip_tos_to_server=None,
link_qos_to_client=None,
link_qos_to_server=None,
load_balancing_mode=None,
min_active_members=None,
min_up_members=None,
min_up_members_action=None,
min_up_members_checking=None,
monitor=None,
profiles=None,
queue_depth_limit=None,
queue_on_connection_limit=None,
queue_time_limit=None,
reselect_tries=None,
service_down_action=None,
slow_ramp_time=None):
'''
A function to connect to a bigip device and modify an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify.
allow_nat
[yes | no]
allow_snat
[yes | no]
description
[string]
gateway_failsafe_device
[string]
ignore_persisted_weight
[yes | no]
ip_tos_to_client
[pass-through | [integer]]
ip_tos_to_server
[pass-through | [integer]]
link_qos_to_client
[pass-through | [integer]]
link_qos_to_server
[pass-through | [integer]]
load_balancing_mode
[dynamic-ratio-member | dynamic-ratio-node |
fastest-app-response | fastest-node |
least-connections-members |
least-connections-node |
least-sessions |
observed-member | observed-node |
predictive-member | predictive-node |
ratio-least-connections-member |
ratio-least-connections-node |
ratio-member | ratio-node | ratio-session |
round-robin | weighted-least-connections-member |
weighted-least-connections-node]
min_active_members
[integer]
min_up_members
[integer]
min_up_members_action
[failover | reboot | restart-all]
min_up_members_checking
[enabled | disabled]
monitor
[name]
profiles
[none | profile_name]
queue_on_connection_limit
[enabled | disabled]
queue_depth_limit
[integer]
queue_time_limit
[integer]
reselect_tries
[integer]
service_down_action
[drop | none | reselect | reset]
slow_ramp_time
[integer]
CLI Example::
salt '*' bigip.modify_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 min_active_members=1
'''
params = {
'description': description,
'gateway-failsafe-device': gateway_failsafe_device,
'ignore-persisted-weight': ignore_persisted_weight,
'ip-tos-to-client': ip_tos_to_client,
'ip-tos-to-server': ip_tos_to_server,
'link-qos-to-client': link_qos_to_client,
'link-qos-to-server': link_qos_to_server,
'load-balancing-mode': load_balancing_mode,
'min-active-members': min_active_members,
'min-up-members': min_up_members,
'min-up_members-action': min_up_members_action,
'min-up-members-checking': min_up_members_checking,
'monitor': monitor,
'profiles': profiles,
'queue-on-connection-limit': queue_on_connection_limit,
'queue-depth-limit': queue_depth_limit,
'queue-time-limit': queue_time_limit,
'reselect-tries': reselect_tries,
'service-down-action': service_down_action,
'slow-ramp-time': slow_ramp_time
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'allow-nat': {'type': 'yes_no', 'value': allow_nat},
'allow-snat': {'type': 'yes_no', 'value': allow_snat}
}
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_pool(hostname, username, password, name):
'''
A function to connect to a bigip device and delete a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool which will be deleted
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-pool
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def replace_pool_members(hostname, username, password, name, members):
'''
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
'''
payload = {}
payload['name'] = name
#specify members if provided
if members is not None:
if isinstance(members, str):
members = members.split(',')
pool_members = []
for member in members:
#check to see if already a dictionary ( for states)
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member.keys():
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
pool_members.append(member)
#parse string passed via execution command (for executions)
else:
pool_members.append({'name': member, 'address': member.split(':')[0]})
payload['members'] = pool_members
#build session
bigip_session = _build_session(username, password)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def add_pool_member(hostname, username, password, name, member):
'''
A function to connect to a bigip device and add a new member to an existing pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the member to add
i.e. 10.1.1.2:80
CLI Example:
.. code-block:: bash
salt '*' bigip.add_pool_members bigip admin admin my-pool 10.2.2.1:80
'''
# for states
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member.keys():
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
payload = member
# for execution
else:
payload = {'name': member, 'address': member.split(':')[0]}
#build session
bigip_session = _build_session(username, password)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_pool_member(hostname, username, password, name, member,
connection_limit=None,
description=None,
dynamic_ratio=None,
inherit_profile=None,
logging=None,
monitor=None,
priority_group=None,
profiles=None,
rate_limit=None,
ratio=None,
session=None,
state=None):
'''
A function to connect to a bigip device and modify an existing member of a pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the member to modify i.e. 10.1.1.2:80
connection_limit
[integer]
description
[string]
dynamic_ratio
[integer]
inherit_profile
[enabled | disabled]
logging
[enabled | disabled]
monitor
[name]
priority_group
[integer]
profiles
[none | profile_name]
rate_limit
[integer]
ratio
[integer]
session
[user-enabled | user-disabled]
state
[ user-up | user-down ]
CLI Example::
salt '*' bigip.modify_pool_member bigip admin admin my-pool 10.2.2.1:80 state=use-down session=user-disabled
'''
params = {
'connection-limit': connection_limit,
'description': description,
'dynamic-ratio': dynamic_ratio,
'inherit-profile': inherit_profile,
'logging': logging,
'monitor': monitor,
'priority-group': priority_group,
'profiles': profiles,
'rate-limit': rate_limit,
'ratio': ratio,
'session': session,
'state': state
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members/{member}'.format(name=name, member=member), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_pool_member(hostname, username, password, name, member):
'''
A function to connect to a bigip device and delete a specific pool.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
member
The name of the pool member to delete
CLI Example::
salt '*' bigip.delete_node bigip admin admin my-pool 10.2.2.2:80
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/pool/{name}/members/{member}'.format(name=name, member=member))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_virtual(hostname, username, password, name=None):
'''
A function to connect to a bigip device and list all virtuals or a specific virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to list. If no name is specified than all
virtuals will be listed.
CLI Example::
salt '*' bigip.list_virtual bigip admin admin my-virtual
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}/?expandSubcollections=true'.format(name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual')
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_virtual(hostname, username, password, name, destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
r'''
A function to connect to a bigip device and create a virtual server.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no]
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward
(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[none | profile1,profile2,profile3 ... ]
profiles
[none | default | profile1,profile2,profile3 ... ]
policies
[none | default | policy1,policy2,policy3 ... ]
rate_class
[name]
rate_limit
[integer]
rate_limit_mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limitçsrc
[integer]
rules
[none | [rule_one,rule_two ...] ]
related_rules
[none | [rule_one,rule_two ...] ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | class_one,class_two ... ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | [enabled|disabled]:vlan1,vlan2,vlan3 ... ]
CLI Examples::
salt '*' bigip.create_virtual bigip admin admin my-virtual-3 26.2.2.5:80 \
pool=my-http-pool-http profiles=http,tcp
salt '*' bigip.create_virtual bigip admin admin my-virtual-3 43.2.2.5:80 \
pool=test-http-pool-http profiles=http,websecurity persist=cookie,hash \
policies=asm_auto_l7_policy__http-virtual \
rules=_sys_APM_ExchangeSupport_helper,_sys_https_redirect \
related_rules=_sys_APM_activesync,_sys_APM_ExchangeSupport_helper \
source_address_translation=snat:my-snat-pool \
translate_address=enabled translate_port=enabled \
traffic_classes=my-class,other-class \
vlans=enabled:external,internal
'''
params = {
'pool': pool,
'auto-lasthop': auto_lasthop,
'bwc-policy': bwc_policy,
'connection-limit': connection_limit,
'description': description,
'fallback-persistence': fallback_persistence,
'flow-eviction-policy': flow_eviction_policy,
'gtm-score': gtm_score,
'ip-protocol': ip_protocol,
'last-hop-pool': last_hop_pool,
'mask': mask,
'mirror': mirror,
'nat64': nat64,
'persist': persist,
'rate-class': rate_class,
'rate-limit': rate_limit,
'rate-limit-mode': rate_limit_mode,
'rate-limit-dst': rate_limit_dst,
'rate-limit-src': rate_limit_src,
'source': source,
'source-port': source_port,
'translate-address': translate_address,
'translate-port': translate_port
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'address-status': {'type': 'yes_no', 'value': address_status},
'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
'reject': {'type': 'true_false', 'value': reject},
'12-forward': {'type': 'true_false', 'value': twelve_forward},
'internal': {'type': 'true_false', 'value': internal},
'ip-forward': {'type': 'true_false', 'value': ip_forward}
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
payload['name'] = name
payload['destination'] = destination
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify profiles if provided
if profiles is not None:
payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
#specify persist if provided
if persist is not None:
payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
#specify policies if provided
if policies is not None:
payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
#specify rules if provided
if rules is not None:
payload['rules'] = _build_list(rules, None)
#specify related-rules if provided
if related_rules is not None:
payload['related-rules'] = _build_list(related_rules, None)
#handle source-address-translation
if source_address_translation is not None:
#check to see if this is already a dictionary first
if isinstance(source_address_translation, dict):
payload['source-address-translation'] = source_address_translation
elif source_address_translation == 'none':
payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
elif source_address_translation == 'automap':
payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
elif source_address_translation == 'lsn':
payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
elif source_address_translation.startswith('snat'):
snat_pool = source_address_translation.split(':')[1]
payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
#specify related-rules if provided
if traffic_classes is not None:
payload['traffic-classes'] = _build_list(traffic_classes, None)
#handle vlans
if vlans is not None:
#ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
payload['vlans'] = vlans['vlan_ids']
if vlans['enabled']:
payload['vlans-enabled'] = True
elif vlans['disabled']:
payload['vlans-disabled'] = True
except Exception:
return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
elif vlans == 'none':
payload['vlans'] = 'none'
elif vlans == 'default':
payload['vlans'] = 'default'
elif isinstance(vlans, str) and (vlans.startswith('enabled') or vlans.startswith('disabled')):
try:
vlans_setting = vlans.split(':')[0]
payload['vlans'] = vlans.split(':')[1].split(',')
if vlans_setting == 'disabled':
payload['vlans-disabled'] = True
elif vlans_setting == 'enabled':
payload['vlans-enabled'] = True
except Exception:
return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
else:
return 'Error: vlans must be a dictionary or string.'
#determine state
if state is not None:
if state == 'enabled':
payload['enabled'] = True
elif state == 'disabled':
payload['disabled'] = True
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual', data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_virtual(hostname, username, password, name,
destination=None,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
'''
A function to connect to a bigip device and modify an existing virtual server.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to modify
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward
(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[none | profile1,profile2,profile3 ... ]
profiles
[none | default | profile1,profile2,profile3 ... ]
policies
[none | default | policy1,policy2,policy3 ... ]
rate_class
[name]
rate_limit
[integer]
rate_limitr_mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | [rule_one,rule_two ...] ]
related_rules
[none | [rule_one,rule_two ...] ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disable]
traffic_classes
[none | default | class_one,class_two ... ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | [enabled|disabled]:vlan1,vlan2,vlan3 ... ]
CLI Example::
salt '*' bigip.modify_virtual bigip admin admin my-virtual source_address_translation=none
salt '*' bigip.modify_virtual bigip admin admin my-virtual rules=my-rule,my-other-rule
'''
params = {
'destination': destination,
'pool': pool,
'auto-lasthop': auto_lasthop,
'bwc-policy': bwc_policy,
'connection-limit': connection_limit,
'description': description,
'fallback-persistence': fallback_persistence,
'flow-eviction-policy': flow_eviction_policy,
'gtm-score': gtm_score,
'ip-protocol': ip_protocol,
'last-hop-pool': last_hop_pool,
'mask': mask,
'mirror': mirror,
'nat64': nat64,
'persist': persist,
'rate-class': rate_class,
'rate-limit': rate_limit,
'rate-limit-mode': rate_limit_mode,
'rate-limit-dst': rate_limit_dst,
'rate-limit-src': rate_limit_src,
'source': source,
'source-port': source_port,
'translate-address': translate_address,
'translate-port': translate_port
}
# some options take yes no others take true false. Figure out when to use which without
# confusing the end user
toggles = {
'address-status': {'type': 'yes_no', 'value': address_status},
'cmp-enabled': {'type': 'yes_no', 'value': cmp_enabled},
'dhcp-relay': {'type': 'true_false', 'value': dhcp_relay},
'reject': {'type': 'true_false', 'value': reject},
'12-forward': {'type': 'true_false', 'value': twelve_forward},
'internal': {'type': 'true_false', 'value': internal},
'ip-forward': {'type': 'true_false', 'value': ip_forward}
}
#build session
bigip_session = _build_session(username, password)
#build payload
payload = _loop_payload(params)
payload['name'] = name
#determine toggles
payload = _determine_toggles(payload, toggles)
#specify profiles if provided
if profiles is not None:
payload['profiles'] = _build_list(profiles, 'ltm:virtual:profile')
#specify persist if provided
if persist is not None:
payload['persist'] = _build_list(persist, 'ltm:virtual:persist')
#specify policies if provided
if policies is not None:
payload['policies'] = _build_list(policies, 'ltm:virtual:policy')
#specify rules if provided
if rules is not None:
payload['rules'] = _build_list(rules, None)
#specify related-rules if provided
if related_rules is not None:
payload['related-rules'] = _build_list(related_rules, None)
#handle source-address-translation
if source_address_translation is not None:
if source_address_translation == 'none':
payload['source-address-translation'] = {'pool': 'none', 'type': 'none'}
elif source_address_translation == 'automap':
payload['source-address-translation'] = {'pool': 'none', 'type': 'automap'}
elif source_address_translation == 'lsn':
payload['source-address-translation'] = {'pool': 'none', 'type': 'lsn'}
elif source_address_translation.startswith('snat'):
snat_pool = source_address_translation.split(':')[1]
payload['source-address-translation'] = {'pool': snat_pool, 'type': 'snat'}
#specify related-rules if provided
if traffic_classes is not None:
payload['traffic-classes'] = _build_list(traffic_classes, None)
#handle vlans
if vlans is not None:
#ceck to see if vlans is a dictionary (used when state makes use of function)
if isinstance(vlans, dict):
try:
payload['vlans'] = vlans['vlan_ids']
if vlans['enabled']:
payload['vlans-enabled'] = True
elif vlans['disabled']:
payload['vlans-disabled'] = True
except Exception:
return 'Error: Unable to Parse vlans dictionary: \n\tvlans={vlans}'.format(vlans=vlans)
elif vlans == 'none':
payload['vlans'] = 'none'
elif vlans == 'default':
payload['vlans'] = 'default'
elif vlans.startswith('enabled') or vlans.startswith('disabled'):
try:
vlans_setting = vlans.split(':')[0]
payload['vlans'] = vlans.split(':')[1].split(',')
if vlans_setting == 'disabled':
payload['vlans-disabled'] = True
elif vlans_setting == 'enabled':
payload['vlans-enabled'] = True
except Exception:
return 'Error: Unable to Parse vlans option: \n\tvlans={vlans}'.format(vlans=vlans)
#determine state
if state is not None:
if state == 'enabled':
payload['enabled'] = True
elif state == 'disabled':
payload['disabled'] = True
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}'.format(name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_virtual(hostname, username, password, name):
'''
A function to connect to a bigip device and delete a specific virtual.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to delete
CLI Example::
salt '*' bigip.delete_virtual bigip admin admin my-virtual
'''
#build session
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/virtual/{name}'.format(name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_monitor(hostname, username, password, monitor_type, name=None, ):
'''
A function to connect to a bigip device and list an existing monitor. If no name is provided than all
monitors of the specified type will be listed.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor(s) to list
name
The name of the monitor to list
CLI Example::
salt '*' bigip.list_monitor bigip admin admin http my-http-monitor
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}?expandSubcollections=true'.format(type=monitor_type, name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}'.format(type=monitor_type))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_monitor(hostname, username, password, monitor_type, name, **kwargs):
'''
A function to connect to a bigip device and create a monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to create
name
The name of the monitor to create
kwargs
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
CLI Example::
salt '*' bigip.create_monitor bigip admin admin http my-http-monitor timeout=10 interval=5
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different monitors and a ton of options for each type of monitor.
#this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'type']:
key = key.replace('_', '-')
payload[key] = value
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}'.format(type=monitor_type), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_monitor(hostname, username, password, monitor_type, name, **kwargs):
'''
A function to connect to a bigip device and modify an existing monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to modify
name
The name of the monitor to modify
kwargs
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
CLI Example::
salt '*' bigip.modify_monitor bigip admin admin http my-http-monitor timout=16 interval=6
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
#there's a ton of different monitors and a ton of options for each type of monitor.
#this logic relies that the end user knows which options are meant for which monitor types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'type', 'name']:
key = key.replace('_', '-')
payload[key] = value
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_monitor(hostname, username, password, monitor_type, name):
'''
A function to connect to a bigip device and delete an existing monitor.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
monitor_type
The type of monitor to delete
name
The name of the monitor to delete
CLI Example::
salt '*' bigip.delete_monitor bigip admin admin http my-http-monitor
'''
#build sessions
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/monitor/{type}/{name}'.format(type=monitor_type, name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
def list_profile(hostname, username, password, profile_type, name=None, ):
'''
A function to connect to a bigip device and list an existing profile. If no name is provided than all
profiles of the specified type will be listed.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile(s) to list
name
The name of the profile to list
CLI Example::
salt '*' bigip.list_profile bigip admin admin http my-http-profile
'''
#build sessions
bigip_session = _build_session(username, password)
#get to REST
try:
if name:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}?expandSubcollections=true'.format(type=profile_type, name=name))
else:
response = bigip_session.get(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def create_profile(hostname, username, password, profile_type, name, **kwargs):
r'''
A function to connect to a bigip device and create a profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
``[ arg=val ] ... [arg=key1:val1,key2:val2] ...``
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
Creating Complex Args
Profiles can get pretty complicated in terms of the amount of possible
config options. Use the following shorthand to create complex arguments such
as lists, dictionaries, and lists of dictionaries. An option is also
provided to pass raw json as well.
lists ``[i,i,i]``:
``param='item1,item2,item3'``
Dictionary ``[k:v,k:v,k,v]``:
``param='key-1:val-1,key-2:val2,key-3:va-3'``
List of Dictionaries ``[k:v,k:v|k:v,k:v|k:v,k:v]``:
``param='key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2'``
JSON: ``'j{ ... }j'``:
``cert-key-chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'``
Escaping Delimiters:
Use ``\,`` or ``\:`` or ``\|`` to escape characters which shouldn't
be treated as delimiters i.e. ``ciphers='DEFAULT\:!SSLv3'``
CLI Examples::
salt '*' bigip.create_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http'
salt '*' bigip.create_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http' \
enforcement=maxHeaderCount:3200,maxRequests:10
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different profiles and a ton of options for each type of profile.
#this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'profile_type']:
key = key.replace('_', '-')
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
#post to REST
try:
response = bigip_session.post(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}'.format(type=profile_type), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def modify_profile(hostname, username, password, profile_type, name, **kwargs):
r'''
A function to connect to a bigip device and create a profile.
A function to connect to a bigip device and create a profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to create
name
The name of the profile to create
kwargs
``[ arg=val ] ... [arg=key1:val1,key2:val2] ...``
Consult F5 BIGIP user guide for specific options for each monitor type.
Typically, tmsh arg names are used.
Creating Complex Args
Profiles can get pretty complicated in terms of the amount of possible
config options. Use the following shorthand to create complex arguments such
as lists, dictionaries, and lists of dictionaries. An option is also
provided to pass raw json as well.
lists ``[i,i,i]``:
``param='item1,item2,item3'``
Dictionary ``[k:v,k:v,k,v]``:
``param='key-1:val-1,key-2:val2,key-3:va-3'``
List of Dictionaries ``[k:v,k:v|k:v,k:v|k:v,k:v]``:
``param='key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2|key-1:val-1,key-2:val-2'``
JSON: ``'j{ ... }j'``:
``cert-key-chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'``
Escaping Delimiters:
Use ``\,`` or ``\:`` or ``\|`` to escape characters which shouldn't
be treated as delimiters i.e. ``ciphers='DEFAULT\:!SSLv3'``
CLI Examples::
salt '*' bigip.modify_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http'
salt '*' bigip.modify_profile bigip admin admin http my-http-profile defaultsFrom='/Common/http' \
enforcement=maxHeaderCount:3200,maxRequests:10
salt '*' bigip.modify_profile bigip admin admin client-ssl my-client-ssl-1 retainCertificate=false \
ciphers='DEFAULT\:!SSLv3'
cert_key_chain='j{ "default": { "cert": "default.crt", "chain": "default.crt", "key": "default.key" } }j'
'''
#build session
bigip_session = _build_session(username, password)
#construct the payload
payload = {}
payload['name'] = name
#there's a ton of different profiles and a ton of options for each type of profile.
#this logic relies that the end user knows which options are meant for which profile types
for key, value in six.iteritems(kwargs):
if not key.startswith('__'):
if key not in ['hostname', 'username', 'password', 'profile_type']:
key = key.replace('_', '-')
try:
payload[key] = _set_value(value)
except salt.exceptions.CommandExecutionError:
return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}'.format(key=key, value=value)
#put to REST
try:
response = bigip_session.put(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}'.format(type=profile_type, name=name), data=json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response)
def delete_profile(hostname, username, password, profile_type, name):
'''
A function to connect to a bigip device and delete an existing profile.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
profile_type
The type of profile to delete
name
The name of the profile to delete
CLI Example::
salt '*' bigip.delete_profile bigip admin admin http my-http-profile
'''
#build sessions
bigip_session = _build_session(username, password)
#delete to REST
try:
response = bigip_session.delete(BIG_IP_URL_BASE.format(host=hostname)+'/ltm/profile/{type}/{name}'.format(type=profile_type, name=name))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
if _load_response(response) == '':
return True
else:
return _load_response(response)
|
apache-2.0
|
hrashk/sympy
|
sympy/series/kauers.py
|
5
|
1876
|
from __future__ import print_function, division
from sympy import expand
from sympy import diff
from sympy import Sum
def finite_diff(expression, variable, increment=1):
"""
Takes as input a polynomial expression and the variable used to construct
it and returns the difference between function's value when the input is
incremented to 1 and the original function value. If you want an increment
other than one supply it as a third argument.
Examples
=========
>>> from sympy.abc import x, y, z, k, n
>>> from sympy.series.kauers import finite_diff
>>> from sympy import Sum
>>> finite_diff(x**2, x)
2*x + 1
>>> finite_diff(y**3 + 2*y**2 + 3*y + 4, y)
3*y**2 + 7*y + 6
>>> finite_diff(x**2 + 3*x + 8, x, 2)
4*x + 10
>>> finite_diff(z**3 + 8*z, z, 3)
9*z**2 + 27*z + 51
"""
expression = expression.expand()
expression2 = expression.subs(variable, variable + increment)
expression2 = expression2.expand()
return expression2 - expression
def finite_diff_kauers(sum):
"""
Takes as input a Sum instance and returns the difference between the sum
with the upper index incremented by 1 and the original sum. For example,
if S(n) is a sum, then finite_diff_kauers will return S(n + 1) - S(n).
Examples
========
>>> from sympy.series.kauers import finite_diff_kauers
>>> from sympy import Sum
>>> from sympy.abc import x, y, m, n, k
>>> finite_diff_kauers(Sum(k, (k, 1, n)))
n + 1
>>> finite_diff_kauers(Sum(1/k, (k, 1, n)))
1/(n + 1)
>>> finite_diff_kauers(Sum((x*y**2), (x, 1, n), (y, 1, m)))
(m + 1)**2*(n + 1)
>>> finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n)))
(m + 1)*(n + 1)
"""
function = sum.function
for l in sum.limits:
function = function.subs(l[0], l[- 1] + 1)
return function
|
bsd-3-clause
|
adamwg/volsched
|
sched/widgets.py
|
1
|
3356
|
from django.conf import settings
from django import newforms as forms
from datetime import datetime, time, date
from time import strptime
# DATETIMEWIDGET
calbtn = u"""
<script type="text/javascript">
Calendar.setup({
inputField : "%s",
ifFormat : "%s",
button : "%s",
singleClick : true,
showsTime : %s
});
</script>"""
class DateTimeWidget(forms.TextInput):
dformat = '%Y-%m-%d %H:%M'
disabled = False
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
try:
final_attrs['value'] = value.strftime(self.dformat)
except:
final_attrs['value'] = value
if not final_attrs.has_key('id'):
final_attrs['id'] = u'%s_id' % (name)
id = final_attrs['id']
if self.disabled:
final_attrs['enabled'] = 0
else:
final_attrs['enabled'] = 1
jsdformat = self.dformat #.replace('%', '%%')
cal = calbtn % (id, jsdformat, id, 'true')
a = u'<input%s />%s' % (forms.util.flatatt(final_attrs), cal)
return a
def disable(self):
self.disabled = True
def value_from_datadict(self, data, name):
dtf = forms.fields.DEFAULT_DATETIME_INPUT_FORMATS
empty_values = forms.fields.EMPTY_VALUES
value = data.get(name, None)
if value in empty_values:
return None
if isinstance(value, datetime):
return value
if isinstance(value, date):
return datetime(value.year, value.month, value.day)
for format in dtf:
try:
return datetime(*strptime(value, format)[:6])
except ValueError:
continue
return None
class DateWidget(forms.TextInput):
dformat = '%Y-%m-%d'
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
try:
final_attrs['value'] = value.strftime(self.dformat)
except:
final_attrs['value'] = value
if not final_attrs.has_key('id'):
final_attrs['id'] = u'%s_id' % (name)
id = final_attrs['id']
jsdformat = self.dformat #.replace('%', '%%')
cal = calbtn % (id, jsdformat, id, 'false')
a = u'<input%s />%s' % (forms.util.flatatt(final_attrs), cal)
return a
def value_from_datadict(self, data, name):
dtf = forms.fields.DEFAULT_DATETIME_INPUT_FORMATS
empty_values = forms.fields.EMPTY_VALUES
value = data.get(name, None)
if value in empty_values:
return None
if isinstance(value, datetime):
return value
if isinstance(value, date):
return datetime(value.year, value.month, value.day)
for format in dtf:
try:
return datetime(*strptime(value, format)[:6])
except ValueError:
continue
return None
|
mit
|
irkru/python-twitter
|
examples/shorten_url.py
|
19
|
2060
|
#!/usr/bin/env python
#
# Copyright 2007-2013 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that defines the default URL Shortener.
TinyURL is provided as the default and as an example.
"""
import urllib
# Change History
#
# 2010-05-16
# TinyURL example and the idea for this comes from a bug filed by
# acolorado with patch provided by ghills. Class implementation
# was done by bear.
#
# Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19
#
class ShortenURL(object):
"""Helper class to make URL Shortener calls if/when required"""
def __init__(self,
userid=None,
password=None):
"""Instantiate a new ShortenURL object
Args:
userid: userid for any required authorization call [optional]
password: password for any required authorization call [optional]
"""
self.userid = userid
self.password = password
def Shorten(self,
longURL):
"""Call TinyURL API and returned shortened URL result
Args:
longURL: URL string to shorten
Returns:
The shortened URL as a string
Note:
longURL is required and no checks are made to ensure completeness
"""
result = None
f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL)
try:
result = f.read()
finally:
f.close()
return result
|
apache-2.0
|
arrac/eluka
|
ext/libsvm/python/svm.py
|
5
|
7768
|
#!/usr/bin/env python
from ctypes import *
from ctypes.util import find_library
import sys
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
if sys.platform == 'win32':
libsvm = CDLL('../windows/libsvm.dll')
else:
libsvm = CDLL('../libsvm.so.2')
# Construct constants
SVM_TYPE = ['C_SVC', 'NU_SVC', 'ONE_CLASS', 'EPSILON_SVR', 'NU_SVR' ]
KERNEL_TYPE = ['LINEAR', 'POLY', 'RBF', 'SIGMOID', 'PRECOMPUTED']
for i, s in enumerate(SVM_TYPE): exec("%s = %d" % (s , i))
for i, s in enumerate(KERNEL_TYPE): exec("%s = %d" % (s , i))
PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return
def genFields(names, types):
return list(zip(names, types))
def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes
class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)
def gen_svm_nodearray(xi, feature_max=None, issparse=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')
if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if issparse:
index_range = filter(lambda j:xi[j] != 0, index_range)
index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx
class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)
def __init__(self, y, x):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)
max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx
self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi
self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi
class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)
def show(self):
attrs = svm_parameter._names + self.__dict__.keys()
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
print(' %s: %s' % (attr, val))
def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = (c_int*0)()
self.weight = (c_double*0)()
self.cross_validation = False
self.nr_fold = 0
self.print_func = None
def parse_options(self, options):
argv = options.split()
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
nr_weight = self.nr_weight
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]
class svm_model(Structure):
def __init__(self):
self.__createfrom__ = 'python'
def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))
def get_svm_type(self):
return libsvm.svm_get_svm_type(self)
def get_nr_class(self):
return libsvm.svm_get_nr_class(self)
def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)
def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]
def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)
def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model
Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m
fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])
fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])
fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])
fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])
|
mit
|
kclauw/Dueling_Network_Architectures
|
dqn/environment.py
|
1
|
2430
|
import gym
import random
import numpy as np
from .utils import rgb2gray, imresize
class Environment(object):
def __init__(self, config):
self.env = gym.make(config.env_name)
screen_width, screen_height, self.action_repeat, self.random_start = \
config.screen_width, config.screen_height, config.action_repeat, config.random_start
self.display = config.display
self.dims = (screen_width, screen_height)
self._screen = None
self.reward = 0
self.terminal = True
def new_game(self, from_random_game=False):
if self.lives == 0:
self._screen = self.env.reset()
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def new_random_game(self):
self.new_game(True)
for _ in xrange(random.randint(0, self.random_start - 1)):
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def _step(self, action):
self._screen, self.reward, self.terminal, _ = self.env.step(action)
def _random_step(self):
action = self.env.action_space.sample()
self._step(action)
@ property
def screen(self):
return imresize(rgb2gray(self._screen)/255., self.dims)
#return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_BGR2YCR_CB)/255., self.dims)[:,:,0]
@property
def action_size(self):
return self.env.action_space.n
@property
def lives(self):
return self.env.ale.lives()
@property
def state(self):
return self.screen, self.reward, self.terminal
def render(self):
if self.display:
self.env.render()
def after_act(self, action):
self.render()
class GymEnvironment(Environment):
def __init__(self, config):
super(GymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
cumulated_reward = 0
start_lives = self.lives
for _ in xrange(self.action_repeat):
self._step(action)
cumulated_reward = cumulated_reward + self.reward
if is_training and start_lives > self.lives:
cumulated_reward -= 1
self.terminal = True
if self.terminal:
break
self.reward = cumulated_reward
self.after_act(action)
return self.state
class SimpleGymEnvironment(Environment):
def __init__(self, config):
super(SimpleGymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
self._step(action)
self.after_act(action)
return self.state
|
mit
|
mmbtba/odoo
|
addons/lunch/wizard/lunch_validation.py
|
440
|
1296
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_validation(osv.Model):
""" lunch validation """
_name = 'lunch.validation'
_description = 'lunch validation for order'
def confirm(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').confirm(cr, uid, ids, context=context)
|
agpl-3.0
|
minhphung171093/GreenERP_V7
|
openerp/addons/portal_project_issue/tests/__init__.py
|
167
|
1124
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
clinton-hall/nzbToMedia
|
libs/common/bs4/diagnose.py
|
2
|
7010
|
"""Diagnostic functions, mainly for use when doing tech support."""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import cProfile
from io import StringIO
from html.parser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems."""
print("Diagnostic running on Beautiful Soup %s" % __version__)
print("Python version %s" % sys.version)
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print((
"I noticed that %s is not installed. Installing it may help." %
name))
if 'lxml' in basic_parsers:
basic_parsers.append("lxml-xml")
try:
from lxml import etree
print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))
except ImportError as e:
print (
"lxml is not installed or couldn't be imported.")
if 'html5lib' in basic_parsers:
try:
import html5lib
print("Found html5lib version %s" % html5lib.__version__)
except ImportError as e:
print (
"html5lib is not installed or couldn't be imported.")
if hasattr(data, 'read'):
data = data.read()
elif data.startswith("http:") or data.startswith("https:"):
print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data)
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
return
else:
try:
if os.path.exists(data):
print('"%s" looks like a filename. Reading data from the file.' % data)
with open(data) as fp:
data = fp.read()
except ValueError:
# This can happen on some platforms when the 'filename' is
# too long. Assume it's data and not a filename.
pass
print()
for parser in basic_parsers:
print("Trying to parse your markup with %s" % parser)
success = False
try:
soup = BeautifulSoup(data, features=parser)
success = True
except Exception as e:
print("%s could not parse the markup." % parser)
traceback.print_exc()
if success:
print("Here's what %s did with the markup:" % parser)
print(soup.prettify())
print("-" * 80)
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running.
"""
from lxml import etree
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
print(("%s, %4s, %s" % (event, element.tag, element.text)))
class AnnouncingParser(HTMLParser):
"""Announces HTMLParser parse events, without doing anything else."""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print("Comparative parser benchmark on Beautiful Soup %s" % __version__)
data = rdoc(num_elements)
print("Generated a large invalid HTML document (%d bytes)." % len(data))
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception as e:
print("%s could not parse the markup." % parser)
traceback.print_exc()
if success:
print("BS4+%s parsed the markup in %.2fs." % (parser, b-a))
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print("Raw lxml parsed the markup in %.2fs." % (b-a))
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print("Raw html5lib parsed the markup in %.2fs." % (b-a))
def profile(num_elements=100000, parser="lxml"):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
if __name__ == '__main__':
diagnose(sys.stdin.read())
|
gpl-3.0
|
openai/baselines
|
baselines/common/mpi_running_mean_std.py
|
1
|
3706
|
try:
from mpi4py import MPI
except ImportError:
MPI = None
import tensorflow as tf, baselines.common.tf_util as U, numpy as np
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-2, shape=()):
self._sum = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="runningsum", trainable=False)
self._sumsq = tf.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="runningsumsq", trainable=False)
self._count = tf.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=False)
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))
newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
updates=[tf.assign_add(self._sum, newsum),
tf.assign_add(self._sumsq, newsumsq),
tf.assign_add(self._count, newcount)])
def update(self, x):
x = x.astype('float64')
n = int(np.prod(self.shape))
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
if MPI is not None:
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
@U.in_session
def test_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
U.initialize()
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.std(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean.eval(), rms.std.eval()]
assert np.allclose(ms1, ms2)
@U.in_session
def test_dist():
np.random.seed(0)
p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))
# p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
# q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))
comm = MPI.COMM_WORLD
assert comm.Get_size()==2
if comm.Get_rank()==0:
x1,x2,x3 = p1,p2,p3
elif comm.Get_rank()==1:
x1,x2,x3 = q1,q2,q3
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])
def checkallclose(x,y):
print(x,y)
return np.allclose(x,y)
assert checkallclose(
bigvec.mean(axis=0),
rms.mean.eval(),
)
assert checkallclose(
bigvec.std(axis=0),
rms.std.eval(),
)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_dist()
|
mit
|
andy-c-jones/event-app-frontend
|
node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/tools/graphviz.py
|
2679
|
2878
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
hlin117/statsmodels
|
statsmodels/tsa/tests/results/savedrvs.py
|
39
|
22934
|
'''Generated Random Processes for tests
autogenerated by savervs.py
'''
import numpy as np
from numpy import array
class Holder(object):
pass
rvsdata = Holder()
rvsdata.comment = 'generated data, divide by 1000, see savervs'
rvsdata.xarma32 = array([-1271, -1222, -840, -169, -1016, -980, -1272, -926, 445, 833, -91, -1974, -2231,
-549, 424, 238, -1665, -1815, 685, 3361, 1912, -1931, -3555, -1817, 387, 730,
-1154, -702, 973, 1340, -161, 276, 200, 1785, 834, -1469, -1593, -134, 555,
-422, -2314, -1326, -2268, -3579, -3049, -930, 1155, 962, -644, -217, -561, 224,
810, 2445, 2710, 2152, 502, 21, 164, -499, -1093, -492, 531, -605, -1535,
-2081, -3816, -2257, 487, 2134, 1785, 1495, 1259, 1895, 1339, 617, 1143, 385,
-1220, -738, 1171, 1047, -234, -107, -1458, -1244, -2737, 33, 2373, 2749, 2725,
3331, 1054, 418, 1231, -1171, -1446, -1187, 863, 1386, 757, 734, 283, -735,
550, 417, -236, 324, 318, -102, 2126, 3246, 2358, 2156, 726, -983, -803,
-242, -500, -13, 49, 308, -227, 243, -612, -2329, -2476, -3441, -5435, -4693,
-2538, -2159, -2656, -906, -211, -288, 1777, 1363, 564, -2035, -1134, -609, -1112,
560, 658, 1533, 796, 523, 456, 76, -1164, -749, -1084, -3218, -2107, -310,
-686, -1625, 2008, 4155, 1650, -1086, -673, 1634, 1999, 449, -1077, -648, -155,
-327, 228, 1295, 2036, 542, -197, -451, -1554, -2416, -2066, -2146, -1524, -1976,
-2962, -2621, -2313, -2052, -3314, -2363, -1522, -3305, -3445, -3206, -1501, 2029, 1963,
1168, 2050, 2927, 2019, 84, 213, 1783, 617, -767, -425, 739, 281, 506,
-749, -938, -284, -147, 51, 1296, 3033, 2263, 1409, -1702, -819, -1295, -1831,
-539, 1327, 1954, 1473, -1535, -1187, -1310, 380, 1621, 2035, 2234, 559, 51,
-1071, 590, 2128, 1483, 848, 1198, 2707, 1447, -629, 237, 909, 453, -734,
-802, 1026, 521, -9, 919, 441, -118, -1073, -2428, 98, 823, 102, -438,
-233, -613, 440, 1143, -743, -1345, 186, -1999, -2351, -887, -584, -883, -623,
-1522, 974, 2318, 1329, -523, -2599, -1555, 826, -859, -2790, -2753, 807, 1889,
-95, -1454, 443, 845, -291, 1516, 2804, 1018, 402, -446, -1721, -1824, 1678,
2889, -663, -560, 628, 1213, 520, -1344, -3029, -3100, -1603, -1480, -1667, -3356,
-4405, -2556, 532, 1602, -15, 646, 2279, 1893, -945, -258, 344, -316, 1130,
1119, 695, 276, 56, -682, -610, 412, 1058, 259, 746, 1197, 1959, 1896,
127, -1301, 1036, 3094, 5213, 3846, 1728, 40, -520, -173, 330, -480, 649,
1621, 1622, -1011, -1851, -2687, -756, 401, 1888, 2372, 4153, 2531, -150, 485,
2600, 2193, -1238, -2702, -184, 1336, 370, -1196, -1737, 637, 634, 77, -1314,
-688, -1375, -1973, -1229, -1414, -2230, -1922, -584, 93, 180, 2158, 2976, 1433,
-173, -1073, -1362, -446, 242, 7, 354, 332, 2003, 1866, -729, -1446, -294,
2438, 3955, 1829, 485, 1028, 981, 1335, 513, -1386, -2583, -1063, 465, 1104,
85, -892, -78, 766, 1995, 891, -170, 2, -428, -562, -1078, -2591, -2077,
-135, -238, -1150, -1207, -185, -46, -1319, -1829, -1409, -926, 576, 1119, 454,
-747, -538, -739, -2994, -3052, -1626, -2472, -1340, -254, -972, -1182, -258, 831,
876, -244, -724, -208, -428, -110, 188, -2187, -2695, -1161, 597, 1492, 1594,
-403, 695, 1834, 1737, 586, -740, 259, -714, -1607, -1082, -365, 2040, 604,
-1253, -1269, -419, -713, -482, 1379, 2335, 1730, 325, -1377, -1721, -1762, -602,
-1224, -839, 70, -1058, -118, -691, -1397, -245, -291, -648, -1489, -1088, -1083,
-160, 1310, 169, -1539, -1558, -2095, -3421, -1609, -465, -867, 311, 272, -157,
-936, -1003, -492, -1526, -2179, -1237, -662, -144, 638, 596, -629, -1893, -671,
324, 408, 367, 1438, 4568, 2576, 677, 701, 2667, 1288, 449, -357, 776,
2250, 2324, 968, 245, 1432, 1597, 843, 88, -274, -256, 830, 348, 534,
140, -560, -1582, -2012, -287, 1470, -729, -2398, -1433, -1409, -1547, 70, 1438,
2246, 408, -293, -566, 374, 1793, 2355, 1104, 358, 2301, 2994, 572, 278,
508, -2406, -2767, -1216, -231, -1717, -1038, 2015, 1469, 1471, 1395, 860, 1148,
1211, 1189, 494, -536, 383, -136, -2171, -2334, -1181, -294, -841, -2051, -3304,
-2254, -926, -811, 160, 1960, 2945, 2466, 1922, 2833, 2421, 1197, 3025, 4033,
3210, 1497, 1912, 1138, 174, -630, -2423, -999, 296, 1519, 2061, 1400, -424,
-609, -978, -1747, -1637, -2454, -1547, 885, 2065, 1530, -1956, 846, 2811, 3105,
2220, 2732, 4631, 3504, 1996, 246, -419, -1541, -1955, -3171, -2742, -811, -318,
-1303, -2002, -997, -487, -2089, -3453, -3373, -1940, -620, 384, 365, -133, -1300,
-833, -1544, -1711, -1981, -315, -155, -1995, -2384, -4010, -5394, -6186, -3794, -1829,
-2637, -4255, -2014, 282, -174, -2623, -2023, -749, -168, -2387, -3959, -4101, -2004,
-2070, -2468, -1831, -1518, 606, 305, 684, 2183, 1218, -1008, -2261, -1276, -99,
889, 740, -525, -1786, -1716, -452, -872, -1384, -1867, -547, -900, -1464, -1898,
-1493, -990, 965, 810, 636, -335, -57, 1761, 2837, 773, 215, 920, 483,
-234, 1301, 2610, 3083, 2329, 920, -827, 22, 4317, 5366, 3711, 2220, 1356,
198, -1385, 656, 1163, -370, -1721, -1005, -832, -1455, -1485, 221, -1445, -1502,
-79, -4, -599, -850, -507, 902, 1909, 1642, -326, -3379, -5642, -7068, -4275,
-1044, 528, 548, 249, -1384, -2485, -1533, -1776, -2930, -2058, -1721, -475, -166,
-1761, -2550, -1586, -240, -1584, -1954, 623, 3826, 2094, -1004, -1782, -267, 2490,
3336, 2293, 189, -108, -315, -965, -125, 1201, 360, -544, -1602, -2150, -901,
1430, 968, -1100, 505, 2880, 2554, 928, 918, 689, -2829, -2478, -2904, -1615,
-242, 243, -1668, -877, 2385, 543, -2462, -1762, 470, 1344, 1493, 1624, 257,
-1833, -1947, -805, -413, 905, 2909, 3272, 1148, -1473, -2368, -1054, -2143, -4330,
-3257, -1939, -1831, -414, -1157, -1212, -1644, -1360, -2409, -4136, -5747, -3415, -1752,
373, -1680, -1267, 2267, 2701, 1101, -1714, -3138, -3153, -3256, -3328, -2661, -879,
2115, 1795, -324, -1930, -1432, -1613, -2301, -1401, 88, 1369, 1063, -854, -2125,
243, 1683, 2011, 2646, 1289, -938, -1205, -1214, 562, 2641, 3335, 2858, 2650,
1965, 478, 1391, 486, 255, -1764, -813, 84, -453, -809, -1203, -1590, 730,
2059, 234, -319, 0, 624, 1273, 1470, 1882, 2215, 1611, 485, -16, 397,
593, -95, 125, 1435, 2673, 3073, 2262, 1803, 983, 666, 1516, 2821, 2395,
299, 86, 1150, 1214, 751, -1096, -962, -39, -366, -2125, -2086, -1032, -966,
-863, -1522, -1793, 1228, 207, -2243, -1916, -1320, -1530, -2318, -1050, -663, -1137,
-2035, -1198, -246, 753, -185, -709, -231, -1111, -1121, -11, 976, 555, -1947,
-1304, 807, 529, 231, -285, -553, -695, -2006, -1090, -424, 318, -1113])
rvsdata.name = 'rvsdata'
rvsdata.xnormal = array([-1271, 176, -296, 327, -973, 228, -819, 107, 975, 82, -477, -1492, -403,
695, 212, 91, -1549, -45, 1557, 1947, -785, -2139, -1264, 295, 806, 278,
-1244, 787, 752, 173, -738, 969, -646, 1811, -990, -1369, 72, 408, 169,
-587, -1517, 720, -2150, -1233, -121, 682, 1268, -29, -802, 679, -1041, 934,
344, 1788, 486, 460, -834, 40, -93, -751, -374, 345, 500, -1167, -387,
-966, -2369, 1119, 1148, 1193, 411, 626, 45, 960, -293, 11, 806, -866,
-1043, 574, 1072, -328, -381, 433, -1857, 409, -2190, 2614, 1005, 864, 1243,
1268, -1701, 680, 560, -2567, 639, -663, 1513, 215, 69, 498, -504, -771,
1392, -739, -131, 744, -382, -158, 2394, 712, 162, 1064, -1146, -1062, 248,
-171, -411, 665, -236, 350, -503, 645, -1105, -1447, -337, -2050, -2539, -151,
85, -840, -555, 1235, -427, 106, 2227, -828, 252, -2248, 992, -737, -559,
1801, -593, 1438, -583, 342, -10, -331, -1053, 466, -1020, -2163, 1003, 224,
-794, -406, 3338, 1021, -1157, -788, 242, 1219, 267, -455, -843, 183, -196,
-181, 699, 822, 825, -920, 110, -482, -1332, -843, -256, -989, 232, -1082,
-1221, -200, -800, -344, -1779, 559, -485, -2241, -84, -1173, 701, 2685, -395,
644, 1374, 741, -144, -788, 542, 1044, -1150, -296, 281, 485, -495, 829,
-1385, 80, 192, -237, 340, 1213, 1634, -264, 479, -2698, 1282, -1759, -422,
1003, 1015, 668, 332, -2367, 835, -1347, 1532, 828, 766, 907, -1122, 265,
-1357, 1658, 879, -199, 433, 532, 1482, -925, -812, 1081, -191, -126, -637,
-45, 1306, -863, 326, 954, -806, 42, -885, -1504, 2167, -496, -63, -19,
-61, -654, 1153, 340, -1586, 97, 836, -2868, 439, 380, -652, -34, 197,
-1342, 2507, 481, -228, -748, -1941, 596, 1137, -1978, -857, -546, 2208, 151,
-864, -446, 1297, -507, -417, 2265, 596, -1011, 719, -1112, -1279, -184, 2721,
371, -2244, 1511, -127, 365, -53, -1399, -1628, -736, 312, -785, -182, -2070,
-1452, 640, 1479, 648, -754, 1396, 1005, -183, -1661, 1296, -547, -532, 1901,
-511, 232, -27, -191, -734, 140, 647, 406, -449, 997, 204, 1035, 352,
-1083, -788, 2025, 1127, 2903, -184, -197, -888, -536, 82, 279, -775, 1426,
490, 362, -1900, -219, -1753, 1342, 166, 1677, 753, 2518, -1078, -990, 1138,
1235, -197, -2026, -747, 1329, 255, -323, -722, -716, 1677, -746, 298, -1190,
509, -1420, -498, 302, -996, -883, -12, 443, 139, 260, 2131, 620, -535,
-443, -870, -671, 535, 213, -172, 612, -169, 1841, -195, -1629, -3, 265,
2091, 1611, -929, 225, 486, -338, 922, -582, -1433, -1072, 765, 424, 696,
-541, -524, 612, 278, 1405, -777, -163, 188, -805, 36, -692, -1680, 268,
810, -688, -359, -120, 386, -248, -1015, -387, -273, -158, 1263, 271, -209,
-716, 208, -738, -2268, -37, 5, -1793, 1277, 46, -967, 151, 427, 579,
178, -621, -189, 167, -563, 481, 93, -2388, -120, 359, 751, 946, 613,
-1484, 1690, 387, 285, -258, -870, 936, -1574, -400, 204, -70, 2254, -1548,
-593, -89, -66, -599, 452, 1518, 658, 195, -496, -1363, -468, -759, 681,
-1159, 579, 368, -1393, 1360, -1277, -474, 959, -779, -77, -864, 141, -632,
770, 1119, -1125, -857, -153, -1451, -1597, 1318, -337, -436, 1453, -619, -64,
-625, -214, 78, -1334, -519, 313, -293, 446, 719, -93, -860, -1006, 823,
57, 199, 332, 1112, 3079, -1616, 338, 298, 1515, -1213, 603, -811, 1023,
1171, 464, -372, -24, 1105, -43, -1, -208, -340, -102, 970, -632, 743,
-459, -520, -977, -671, 1096, 974, -1956, -601, 251, -1197, -108, 1305, 661,
1135, -1164, 195, -628, 708, 1211, 773, -470, 102, 1923, 405, -1286, 932,
-349, -2927, 277, 144, -19, -1333, 988, 2027, -952, 1495, 91, -288, 784,
127, 341, -324, -670, 967, -1015, -1599, -98, -9, 157, -541, -1040, -1576,
297, 67, -285, 1094, 1433, 1051, 440, 491, 1410, -145, -138, 2498, 764,
408, -237, 1099, -888, -184, -541, -1975, 1272, 87, 1229, 908, -97, -1121,
168, -949, -891, -88, -1521, 596, 1512, 747, 259, -2640, 3297, 236, 1135,
500, 1288, 2137, -399, 380, -1022, -439, -1345, -514, -1828, -69, 770, -307,
-693, -606, 370, -290, -1584, -1193, -834, 148, 404, 771, 18, -207, -1068,
393, -1392, -163, -807, 1152, -564, -1495, -210, -2692, -1930, -2043, 660, -58,
-1329, -1511, 1339, 458, -536, -1669, 511, -210, 167, -2028, -1333, -1271, 661,
-1274, -334, 64, -619, 1818, -811, 1078, 1446, -927, -1106, -984, 181, 235,
902, 44, -836, -1021, -298, 479, -916, -219, -811, 804, -1060, -300, -710,
-163, -115, 1647, -480, 582, -807, 412, 1486, 972, -1188, 649, 326, -605,
-109, 1607, 847, 1140, 266, -492, -1229, 912, 3582, 914, 580, 283, -375,
-834, -1206, 1985, -468, -814, -702, 211, -700, -608, -113, 1086, -2231, 662,
581, -565, -131, -197, -39, 1113, 863, 236, -1199, -2557, -2587, -2859, 948,
822, 911, 448, 71, -1579, -959, 292, -1273, -1206, 474, -908, 929, -126,
-1497, -608, 106, 371, -1552, 61, 1758, 2286, -1107, -1079, -534, 404, 2099,
1035, 219, -997, 143, -666, -652, 850, 788, -780, -118, -1156, -878, 675,
1530, -576, -1022, 1862, 1282, 75, -105, 604, -507, -3169, 907, -2181, 792,
540, 180, -1660, 1113, 2179, -2179, -1223, 567, 560, 544, 906, 580, -1054,
-1474, -186, 110, -189, 1452, 1823, 684, -856, -1508, -974, 353, -1953, -1918,
418, -598, -513, 1332, -1457, 226, -905, -78, -1575, -1908, -2347, 923, -460,
1745, -2262, 1119, 2445, -200, 25, -1892, -1465, -1012, -1193, -797, -204, 784,
2291, -382, -782, -1097, -103, -1085, -802, 521, 547, 1020, 17, -1255, -948,
1733, 352, 971, 1444, -955, -1251, -38, -844, 1473, 1673, 1071, 687, 818,
-109, -758, 1446, -1260, 507, -2043, 1091, -143, -573, 40, -705, -693, 1992,
582, -1145, 532, -176, 341, 802, 455, 858, 713, -47, -389, -137, 288,
63, -443, 499, 1048, 1202, 1044, 157, 472, -423, 135, 978, 1325, 104,
-958, 465, 564, -14, 220, -1498, 330, 158, -601, -1414, -14, -29, -535,
186, -954, -468, 2492, -1869, -1194, 345, -835, -578, -838, 802, -596, -418,
-898, 414, 91, 851, -824, -34, 193, -1276, 208, 593, 538, -175, -1954,
860, 843, -610, 617, -511, -339, -261, -1495, 833, -298, 637, -1384])
rvsdata.xar2 = array([-1271, -841, -333, 481, -422, -350, -889, -428, 1077, 1158, -89, -2142, -2073,
108, 1335, 1105, -1332, -1663, 892, 3493, 1563, -2635, -4154, -1710, 1515, 2345,
-125, -486, 426, 757, -346, 314, -222, 1476, 302, -1866, -1571, 84, 1022,
189, -1877, -876, -1912, -2325, -1025, 1024, 2600, 1539, -871, -787, -1235, 339,
1233, 2604, 1953, 721, -1234, -1307, -522, -514, -525, 183, 908, -532, -1267,
-1713, -3107, -509, 2294, 3282, 1890, 497, -502, 310, 206, 21, 720, -301,
-1644, -591, 1421, 1104, -209, -286, -1981, -1033, -2026, 1509, 3226, 2689, 1781,
1348, -1514, -1205, 353, -1682, -883, -529, 1531, 1704, 667, 180, -694, -1416,
607, 454, -71, 460, 22, -371, 2086, 2567, 1172, 719, -1157, -2347, -1052,
161, 243, 779, 266, 174, -497, 160, -729, -2110, -1661, -2324, -3568, -1843,
394, 397, -434, 689, 342, 35, 2084, 822, -133, -2765, -1154, -277, -204,
1777, 931, 1294, -13, -315, -256, -379, -1228, -327, -667, -2533, -690, 938,
302, -633, 2681, 3483, 289, -2298, -1741, 975, 1918, 592, -1329, -1176, -472,
29, 959, 1575, 1606, -423, -1031, -1095, -1693, -1649, -729, -748, -1, -709,
-1788, -1275, -926, -447, -1674, -557, -93, -2038, -1667, -1488, 345, 3705, 2396,
709, 743, 981, 269, -1063, -443, 1222, 48, -868, -438, 569, 180, 689,
-924, -1003, -148, 146, 531, 1564, 2620, 1050, 9, -3216, -1295, -1187, -725,
1016, 2190, 1911, 766, -2710, -1716, -1364, 1298, 2549, 2156, 1357, -1114, -1305,
-1844, 835, 2470, 1359, 286, 81, 1404, 157, -1388, -108, 416, 261, -636,
-685, 1076, 340, 60, 831, -170, -510, -1208, -2215, 999, 1410, 565, -272,
-561, -967, 660, 1351, -835, -1247, 256, -2040, -1321, 344, 283, 21, 72,
-1294, 1436, 2277, 876, -1185, -3327, -1474, 1622, 57, -1623, -1873, 1521, 2304,
219, -1423, 50, 244, -246, 1946, 2276, -164, -550, -1471, -2180, -1193, 2857,
3253, -1070, -971, -369, 556, 576, -1216, -2889, -2439, -195, 279, 139, -2099,
-3201, -871, 2383, 2990, 447, 258, 989, 479, -1772, -361, 50, -312, 1627,
947, 176, -360, -567, -1008, -383, 845, 1273, 147, 478, 513, 1206, 1061,
-837, -1988, 853, 2803, 4719, 2190, -804, -2626, -2234, -392, 1083, 288, 1115,
1238, 795, -1883, -2123, -2510, 395, 1737, 2869, 2179, 2827, 94, -2329, -772,
1781, 1614, -1626, -2855, -142, 1569, 1003, -705, -1781, 605, 628, 498, -1106,
-624, -1367, -1280, -38, -387, -1173, -757, 423, 856, 732, 2289, 2085, -11,
-1494, -2060, -1572, 308, 1245, 670, 525, -84, 1511, 1056, -1540, -1763, -375,
2672, 3936, 884, -1037, -785, -447, 957, 407, -1586, -2544, -477, 1314, 1986,
391, -1204, -547, 443, 2033, 628, -676, -667, -1001, -431, -537, -1894, -979,
974, 581, -382, -716, 4, 113, -926, -1185, -757, -172, 1504, 1560, 287,
-1266, -948, -864, -2485, -1593, -27, -1018, 476, 936, -456, -682, 109, 1007,
929, -382, -959, -409, -410, 357, 584, -2100, -2092, -265, 1585, 2346, 1697,
-1299, -198, 879, 1087, 172, -1276, -171, -1072, -1172, -197, 358, 2639, 384,
-1606, -1566, -516, -229, 528, 2055, 2038, 798, -876, -2463, -2000, -1127, 780,
28, 212, 523, -1080, 234, -549, -1030, 410, 64, -231, -1080, -608, -578,
611, 1897, 87, -1736, -1585, -1852, -2285, 415, 1138, 267, 1097, 125, -512,
-1097, -836, -42, -949, -1257, -219, 161, 683, 1185, 513, -1042, -2096, -333,
839, 1037, 742, 1187, 3658, 717, -917, -794, 1338, 255, 138, -828, 291,
1818, 1772, 137, -801, 396, 674, 341, -273, -729, -548, 895, 358, 582,
-172, -949, -1650, -1517, 708, 2298, -472, -2127, -1215, -1106, -385, 1550, 2094,
2035, -583, -1289, -1367, 258, 2102, 2325, 339, -790, 1122, 1697, -490, -308,
-350, -3053, -1991, 78, 1039, -541, 36, 2326, 890, 1044, 481, -425, 204,
502, 641, -62, -1041, 166, -362, -1971, -1494, -219, 729, 151, -1283, -2678,
-1204, 443, 671, 1409, 2225, 2127, 1029, 251, 1096, 606, -201, 2034, 2492,
1384, -376, 106, -615, -729, -816, -2263, -130, 1114, 2185, 2099, 490, -1779,
-1501, -1260, -1148, -377, -1248, -214, 1965, 2425, 1216, -2880, 385, 1984, 2530,
1532, 1249, 2370, 873, -107, -1544, -1620, -1870, -1200, -1853, -952, 936, 917,
-427, -1407, -542, -20, -1329, -2246, -1967, -303, 1145, 1838, 916, -393, -1841,
-883, -1178, -664, -749, 885, 518, -1523, -1687, -3280, -3711, -3372, -182, 1482,
-52, -2294, -470, 1229, 683, -1737, -1220, -317, 524, -1451, -2756, -2751, -161,
-27, -275, -142, -595, 1413, 617, 865, 1829, 103, -1938, -2586, -919, 793,
1996, 1244, -839, -2314, -1730, 252, 150, -225, -1066, 63, -476, -713, -1042,
-641, -107, 1883, 1079, 504, -944, -595, 1482, 2455, 36, -550, -132, -435,
-391, 1512, 2252, 2185, 888, -874, -2372, -549, 4330, 4652, 2136, -334, -1711,
-2035, -1979, 1419, 1657, -198, -1689, -1041, -688, -639, -279, 1182, -1146, -845,
477, 240, -178, -459, -318, 1088, 1893, 1206, -1180, -4105, -5281, -5031, -437,
2988, 3519, 1770, -273, -2682, -2968, -741, -383, -1141, -248, -535, 625, 641,
-1297, -1966, -818, 700, -584, -756, 1445, 3821, 1227, -2007, -2753, -795, 2840,
3704, 1762, -1440, -1890, -1458, -873, 881, 1929, 323, -824, -1976, -2047, 26,
2574, 1471, -1132, 221, 2024, 1584, 150, -68, -636, -3644, -1691, -1712, 268,
1610, 1334, -1398, -672, 2340, 29, -2370, -1343, 670, 1752, 1973, 1282, -1015,
-2927, -2020, -43, 787, 2103, 3111, 2122, -715, -3141, -3129, -580, -852, -2310,
-1003, -246, -208, 1288, -322, -675, -1284, -768, -1547, -2762, -3783, -722, 853,
2789, -458, -641, 2161, 1850, 424, -2477, -3659, -2700, -1523, -666, 25, 1137,
3188, 1600, -1096, -2774, -1774, -1117, -809, 432, 1297, 1841, 842, -1502, -2570,
428, 1979, 2341, 2327, -264, -2626, -2007, -1137, 1567, 3495, 3083, 1406, 401,
-491, -1351, 610, -96, 125, -1895, -487, 415, 2, -166, -838, -1281, 1387,
2332, 27, -612, -680, 103, 1224, 1383, 1353, 1104, 160, -813, -868, 0,
498, -45, 214, 1242, 2089, 2094, 787, 55, -773, -511, 956, 2346, 1502,
-929, -1029, 205, 664, 649, -1310, -1042, -21, -96, -1481, -1150, -208, -127,
189, -739, -1154, 1938, 258, -1957, -1349, -936, -653, -892, 414, 182, -480,
-1372, -444, 422, 1411, 93, -665, -386, -1252, -600, 739, 1429, 599, -2190,
-1192, 985, 774, 744, -302, -953, -872, -1716, -103, 477, 1071, -766])
|
bsd-3-clause
|
biswajitsahu/kuma
|
vendor/packages/translate/storage/html.py
|
25
|
14600
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006,2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""module for parsing html files for translation"""
import re
from six.moves import html_parser
from six.moves.html_entities import name2codepoint
from translate.storage import base
from translate.storage.base import ParseError
# Override the piclose tag from simple > to ?> otherwise we consume HTML
# within the processing instructions
html_parser.piclose = re.compile('\?>')
strip_html_re = re.compile(r'''
(?s)^ # We allow newlines, and match start of line
<(?P<tag>[^\s?>]+) # Match start of tag and the first character (not ? or >)
(?:
(?:
[^>] # Anything that's not a > is valid tag material
|
(?:<\?.*?\?>) # Matches <? foo ?> lazily; PHP is valid
)* # Repeat over valid tag material
[^?>] # If we have > 1 char, the last char can't be ? or >
)? # The repeated chars are optional, so that <a>, <p> work
> # Match ending > of opening tag
(.*) # Match actual contents of tag
</(?P=tag)> # Match ending tag; can't end with ?> and must be >=1 char
$ # Match end of line
''', re.VERBOSE)
def strip_html(text):
"""Strip unnecessary html from the text.
HTML tags are deemed unnecessary if it fully encloses the translatable
text, eg. '<a href="index.html">Home Page</a>'.
HTML tags that occurs within the normal flow of text will not be removed,
eg. 'This is a link to the <a href="index.html">Home Page</a>.'
"""
text = text.strip()
# If all that is left is PHP, return ""
result = re.findall('(?s)^<\?.*?\?>$', text)
if len(result) == 1:
return ""
result = strip_html_re.findall(text)
if len(result) == 1:
text = strip_html(result[0][1])
return text
normalize_re = re.compile("\s\s+")
def normalize_html(text):
"""Remove double spaces from HTML snippets"""
return normalize_re.sub(" ", text)
def safe_escape(html):
"""Escape &, < and >"""
# FIXME we need to relook at these. Escaping to cleanup htmlentity codes
# is important but we can't mix "<code><". In these cases we should
# then abort the escaping
return re.sub("&(?![a-zA-Z0-9]+;)", "&", html)
class htmlunit(base.TranslationUnit):
"""A unit of translatable/localisable HTML content"""
def __init__(self, source=None):
self.locations = []
self.setsource(source)
def getsource(self):
#TODO: Rethink how clever we should try to be with html entities.
text = self._text.replace("&", "&")
text = text.replace("\r\n", " ").replace("\n", " ").replace("\r", " ")
return text
def setsource(self, source):
self._rich_source = None
self._text = safe_escape(source)
source = property(getsource, setsource)
def addlocation(self, location):
self.locations.append(location)
def getlocations(self):
return self.locations
class htmlfile(html_parser.HTMLParser, base.TranslationStore):
UnitClass = htmlunit
MARKINGTAGS = [
"address",
"caption",
"div",
"dt", "dd",
"figcaption",
"h1", "h2", "h3", "h4", "h5", "h6",
"li",
"p",
"pre",
"title",
"th", "td",
]
"""Text in these tags that will be extracted from the HTML document"""
MARKINGATTRS = []
"""Text from tags with these attributes will be extracted from the HTML
document"""
INCLUDEATTRS = [
"alt",
"abbr",
"content",
"standby",
"summary",
"title"
]
"""Text from these attributes are extracted"""
SELF_CLOSING_TAGS = [
u"area",
u"base",
u"basefont",
u"br",
u"col",
u"frame",
u"hr",
u"img",
u"input",
u"link",
u"meta",
u"param",
]
"""HTML self-closing tags. Tags that should be specified as <img /> but
might be <img>.
`Reference <http://learnwebsitemaking.com/htmlselfclosingtags.html>`_"""
def __init__(self, includeuntaggeddata=None, inputfile=None,
callback=None):
self.units = []
self.filename = getattr(inputfile, 'name', None)
self.currentblock = u""
self.currentcomment = u""
self.currenttag = None
self.currentpos = -1
self.tag_path = []
self.filesrc = u""
self.currentsrc = u""
self.pidict = {}
if callback is None:
self.callback = self._simple_callback
else:
self.callback = callback
self.includeuntaggeddata = includeuntaggeddata
html_parser.HTMLParser.__init__(self)
if inputfile is not None:
htmlsrc = inputfile.read()
inputfile.close()
self.parse(htmlsrc)
def _simple_callback(self, string):
return string
ENCODING_RE = re.compile('''<meta.*
content.*=.*?charset.*?=\s*?
([^\s]*)
\s*?["']\s*?>
''', re.VERBOSE | re.IGNORECASE)
def guess_encoding(self, htmlsrc):
"""Returns the encoding of the html text.
We look for 'charset=' within a meta tag to do this.
"""
result = self.ENCODING_RE.findall(htmlsrc)
encoding = None
if result:
encoding = result[0]
return encoding
def do_encoding(self, htmlsrc):
"""Return the html text properly encoded based on a charset."""
charset = self.guess_encoding(htmlsrc)
if charset:
return htmlsrc.decode(charset)
else:
return htmlsrc.decode('utf-8')
def pi_escape(self, text):
"""Replaces all instances of process instruction with placeholders,
and returns the new text and a dictionary of tags. The current
implementation replaces <?foo?> with <?md5(foo)?>. The hash => code
conversions are stored in self.pidict for later use in restoring the
real PHP.
The purpose of this is to remove all potential "tag-like" code from
inside PHP. The hash looks nothing like an HTML tag, but the following
PHP::
$a < $b ? $c : ($d > $e ? $f : $g)
looks like it contains an HTML tag::
< $b ? $c : ($d >
to nearly any regex. Hence, we replace all contents of PHP with simple
strings to help our regexes out.
"""
result = re.findall('(?s)<\?(.*?)\?>', text)
for pi in result:
pi_escaped = pi.replace("<", "%lt;").replace(">", "%gt;")
self.pidict[pi_escaped] = pi
text = text.replace(pi, pi_escaped)
return text
def pi_unescape(self, text):
"""Replaces the PHP placeholders in text with the real code"""
for pi_escaped, pi in self.pidict.items():
text = text.replace(pi_escaped, pi)
return text
def parse(self, htmlsrc):
htmlsrc = self.do_encoding(htmlsrc)
htmlsrc = self.pi_escape(htmlsrc) # Clear out the PHP before parsing
self.feed(htmlsrc)
def addhtmlblock(self, text):
text = strip_html(text)
text = self.pi_unescape(text) # Before adding anything, restore PHP
text = normalize_html(text)
if self.has_translatable_content(text):
unit = self.addsourceunit(text)
unit.addlocation("%s+%s:%d" %
(self.filename, ".".join(self.tag_path),
self.currentpos))
unit.addnote(self.currentcomment)
def has_translatable_content(self, text):
"""Check if the supplied HTML snippet has any content that needs to be
translated."""
text = text.strip()
result = re.findall('(?i).*(charset.*=.*)', text)
if len(result) == 1:
return False
# TODO: Get a better way to find untranslatable entities.
if text == ' ':
return False
pattern = '<\?.*?\?>' # Lazily strip all PHP
result = re.sub(pattern, '', text).strip()
pattern = '<[^>]*>' # Strip all HTML tags
result = re.sub(pattern, '', result).strip()
if result:
return True
else:
return False
def buildtag(self, tag, attrs=None, startend=False):
"""Create an HTML tag"""
selfclosing = u""
if startend:
selfclosing = u" /"
if attrs != [] and attrs is not None:
return u"<%(tag)s %(attrs)s%(selfclosing)s>" % \
{"tag": tag,
"attrs": " ".join(['%s="%s"' % pair for pair in attrs]),
"selfclosing": selfclosing}
else:
return u"<%(tag)s%(selfclosing)s>" % {"tag": tag,
"selfclosing": selfclosing}
#From here on below, follows the methods of the HTMLParser
def startblock(self, tag, attrs=None):
self.addhtmlblock(self.currentblock)
if self.callback(normalize_html(strip_html(self.currentsrc))):
self.filesrc += self.currentsrc.replace(strip_html(self.currentsrc),
self.callback(normalize_html(strip_html(self.currentsrc)).replace("\n", " ")))
else:
self.filesrc += self.currentsrc
self.currentblock = ""
self.currentcomment = ""
self.currenttag = tag
self.currentpos = self.getpos()[0]
self.currentsrc = self.buildtag(tag, attrs)
def endblock(self):
self.addhtmlblock(self.currentblock)
if self.callback(normalize_html(strip_html(self.currentsrc))) is not None:
self.filesrc += self.currentsrc.replace(strip_html(self.currentsrc),
self.callback(normalize_html(strip_html(self.currentsrc).replace("\n", " "))))
else:
self.filesrc += self.currentsrc
self.currentblock = ""
self.currentcomment = ""
self.currenttag = None
self.currentpos = -1
self.currentsrc = ""
def handle_starttag(self, tag, attrs):
newblock = False
if self.tag_path != [] \
and self.tag_path[-1:][0] in self.SELF_CLOSING_TAGS:
self.tag_path.pop()
self.tag_path.append(tag)
if tag in self.MARKINGTAGS:
newblock = True
for i, attr in enumerate(attrs):
attrname, attrvalue = attr
if attrname in self.MARKINGATTRS:
newblock = True
if attrname in self.INCLUDEATTRS and self.currentblock == "":
self.addhtmlblock(attrvalue)
attrs[i] = (attrname,
self.callback(normalize_html(attrvalue).replace("\n", " ")))
if newblock:
self.startblock(tag, attrs)
elif self.currenttag is not None:
self.currentblock += self.get_starttag_text()
self.currentsrc += self.get_starttag_text()
else:
self.filesrc += self.buildtag(tag, attrs)
def handle_startendtag(self, tag, attrs):
for i, attr in enumerate(attrs):
attrname, attrvalue = attr
if attrname in self.INCLUDEATTRS and self.currentblock == "":
self.addhtmlblock(attrvalue)
attrs[i] = (attrname,
self.callback(normalize_html(attrvalue).replace("\n", " ")))
if self.currenttag is not None:
self.currentblock += self.get_starttag_text()
self.currentsrc += self.get_starttag_text()
else:
self.filesrc += self.buildtag(tag, attrs, startend=True)
def handle_endtag(self, tag):
if tag == self.currenttag:
self.currentsrc += "</%(tag)s>" % {"tag": tag}
self.endblock()
elif self.currenttag is not None:
self.currentblock += '</%s>' % tag
self.currentsrc += '</%s>' % tag
else:
self.filesrc += '</%s>' % tag
try:
popped = self.tag_path.pop()
except IndexError:
raise ParseError("Mismatched tags: no more tags: line %s" %
self.getpos()[0])
while popped in self.SELF_CLOSING_TAGS:
popped = self.tag_path.pop()
if popped != tag:
raise ParseError("Mismatched closing tag: "
"expected '%s' got '%s' at line %s" %
(popped, tag, self.getpos()[0]))
def handle_data(self, data):
if self.currenttag is not None:
self.currentblock += data
self.currentsrc += self.callback(data)
elif self.includeuntaggeddata:
self.startblock(None)
self.currentblock += data
self.currentsrc += data
else:
self.filesrc += self.callback(data)
def handle_charref(self, name):
"""Handle entries in the form &#NNNN; e.g. ⃡"""
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
"""Handle named entities of the form &aaaa; e.g. ’"""
if name in ['gt', 'lt', 'amp']:
self.handle_data("&%s;" % name)
else:
self.handle_data(unichr(name2codepoint.get(name, u"&%s;" % name)))
def handle_comment(self, data):
# we can place comments above the msgid as translator comments!
if self.currentcomment == "":
self.currentcomment = data
else:
self.currentcomment += u'\n' + data
self.filesrc += "<!--%s-->" % data
def handle_pi(self, data):
self.handle_data("<?%s?>" % self.pi_unescape(data))
class POHTMLParser(htmlfile):
pass
|
mpl-2.0
|
mwcraig/scipy_proceedings
|
publisher/tempita/_looper.py
|
140
|
4161
|
"""
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import sys
from tempita.compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
|
bsd-2-clause
|
msurkovsky/kaira
|
gui/settingswindow.py
|
10
|
25305
|
#
# Copyright (C) 2013, 2014 Martin Surkovsky
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import gobject
class SettingWidget(gtk.Table):
"""General widget for visualize and change setting parameters. It provides
methods for access to set values.
Signals:
value-committed -- it is emitted, if a widget which edits a value
lose the focus
"""
WARNING_COLOR = gtk.gdk.color_parse("#f66")
LABEL = 0
VALUE_LABEL = 1
VALUE = 2
def __init__(self):
gtk.Table.__init__(self, 1, 2, False)
self.set_row_spacings(5)
self.set_col_spacing(0, 10)
self.keys = []
self.setting = {} # key: label, value label, function accessing data
self.validators = {} # key: validating function
self.row = 0 # index of current row
def get(self, key):
"""Returns a whole triple (label, value label, and value).
Arguments:
key -- a unique key
"""
assert key in self.setting
return self.setting[key]()
def get_label(self, key):
assert key in self.setting
return self.setting[key]()[self.LABEL]
def get_value_label(self, key):
assert key in self.setting
return self.setting[key]()[self.VALUE_LABEL]
def get_value(self, key):
assert key in self.setting
return self.setting[key]()[self.VALUE]
def validate_value(self, key):
assert key in self.validators
return self.validators[key](self.get_value(key))
def are_values_correct(self):
return all(self.validate_value(key) is None for key in self.keys)
# -------------------------------------------------------------------------
# add general widget
def add_widget(self,
key,
label,
widget,
accessor,
validator=lambda x: None):
"""Adds a general widget which is responsible for correct setting of
its value.
Arguments:
key -- a unique key
label -- a label than will be presented in a widget
widget -- a widget which cares about correct setting of a value
accessor -- a function which create a triple (label, value label, and
value). The function does not take any argument.
Keywords:
validator -- a function which checks a value. If the value is correct
validator returns None, otherwise it returns a message containing what
is wrong (default: a function returns None).
"""
self.keys.append(key)
self.setting[key] = accessor
self.validators[key] = validator
lbl = gtk.Label(label)
lbl.set_alignment(0.0, 0.0)
self.attach(lbl,
0, 1,
self.row, self.row+1,
xoptions=gtk.FILL, yoptions=gtk.FILL,
xpadding=5)
yoptions = (gtk.EXPAND|gtk.FILL
if isinstance(widget, gtk.ScrolledWindow) else gtk.FILL)
self.attach(widget,
1, 2,
self.row, self.row+1,
xoptions=gtk.EXPAND|gtk.FILL, yoptions=yoptions,
xpadding=5)
widget.connect("focus-out-event", self._cb_focus_out, key)
self.row += 1
self.resize(self.row, 2)
def add_separator(self):
self.attach(
gtk.HSeparator(),
0, 2,
self.row, self.row+1,
xoptions=gtk.EXPAND|gtk.FILL, yoptions=0,
xpadding=5, ypadding=5)
self.row += 1
self.resize(self.row+1, 2)
def _cb_focus_out(self, widget, event, key):
self.emit("value-committed", key)
# -------------------------------------------------------------------------
# add specific components
def add_entry(self,
key,
label,
default,
validator=lambda x: None,
strToValue=lambda x: x):
""" Adds an entry component for editing parameters than can be easily
represented as a string (string, numbers, etc.).
Arguments:
key -- the unique key
label -- label than will be presented in a widget
default_value -- default value of a setting's argument
Keywords:
validator -- a function than checks values (default: a function
returns None)
strToValue -- a function converts a string representation to required
type, e.g. from string to int, etc. If the value cannot be convert
than it throws a ValueError. (default: function returns given value)
"""
entry = gtk.Entry()
def get():
return (label, None, strToValue(entry.get_text()))
self.add_widget(key, label, entry, get, validator)
def add_combobox(self, key, label, items, default=0):
"""Adds to a setting widget a combo-box where it can be selected one
of the items.
Arguments:
key -- unique key
label -- the showed label in setting widget
items -- couple: (object label, object)
default -- index of default item
"""
assert default < len(items)
store = gtk.ListStore(str, object)
for item in items:
store.append(item)
combo = gtk.ComboBox(store)
cell = gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, 'text', 0)
combo.set_active(default)
def get():
vlabel, value = items[combo.get_active()]
return (label, vlabel, value)
self.add_widget(key, label, combo, get)
def add_radiobuttons(self, key, label, items, default=0, ncols=1):
"""Adds a list of radio-buttons where one of them can be selected.
Arguments:
key -- unique key
label -- the showed label in setting widget
items -- couples: (object label, object)
Keywords:
default -- the index of default value (default: first item)
ncols -- number of radio-buttons which will be put in one line.
(default: 1)
"""
assert default < len(items)
buttons = []
button = None
vbox, hbox = gtk.VBox(), gtk.HBox()
for idx, (vlabel, value) in enumerate(items):
button = gtk.RadioButton(button, vlabel)
buttons.append((button, vlabel, value))
hbox.pack_start(button, False, False, 3)
idx += 1
if idx % ncols == 0:
vbox.pack_start(hbox, False, False)
hbox = gtk.HBox()
if idx % ncols != 0: # add last unprocessed row
vbox.pack_start(hbox, False, False)
buttons[default][0].set_active(True) # activate default button
vbox.show_all()
def get():
for button, vlabel, value in buttons:
if button.get_active():
return (label, vlabel, value)
self.add_widget(key, label, vbox, get)
def add_checkbutton(self, key, label, default_value=True, button_label=""):
button = gtk.CheckButton(button_label)
button.set_active(default_value)
def get():
return (label, None, button.get_active())
self.add_widget(key, label, button, get)
def add_checkbuttons(self, key, label, items, ncols=1):
"""Adds to a setting widget a list of check-box buttons.
Arguments:
key -- unique key
label -- the showed label in setting widget
items -- tuple: (object label, object, selected)
Keywords:
ncols -- a number of check-boxes which should be in one line
(default: 1)
"""
buttons = []
vbox = gtk.VBox()
for idx, (vlabel, value, selected) in enumerate(items):
if idx % ncols == 0:
hbox = gtk.HBox()
vbox.pack_start(hbox, False, False)
button = gtk.CheckButton(vlabel)
buttons.append((button, vlabel, value))
if selected:
button.set_active(True)
hbox.pack_start(button, False, False, 3)
vbox.show_all()
def get():
values = []
vlabels = []
for button, vlabel, value in buttons:
if button.get_active():
vlabels.append(vlabel)
values.append(value)
return (label, repr(vlabels), values)
self.add_widget(key, label, vbox, get)
def add_checkbuttons_list(self, key, label, items, header):
"""Adds a list of check button in form of two columns table, where
the first one contains a name of a value and the second one contains a
check-button for select/deselect values.
!! Broken works not only for a single checkbox column TODO: What should it be repaired?
Arguments:
key -- unique key
label -- the showed name in setting widget
items -- a list of tuples (label, object, checked1?, checked2?, ...)
header -- list of column names (labels, check1, check2, ...)
"""
VLABEL, VALUE, SELECTED = 0, 1, 2
def callback(crtoggle, path, store):
siter = store.get_iter(path)
select = store.get_value(siter, SELECTED)
store.set_value(siter, SELECTED, not select)
scw = gtk.ScrolledWindow()
scw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
store = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT,
*((len(header) - 1) * [gobject.TYPE_BOOLEAN]))
tree_view = gtk.TreeView(store)
scw.add_with_viewport(tree_view)
# column with labels of values
text_renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(header[VLABEL], text_renderer, text=0)
column.set_expand(True)
column.set_sort_column_id(0)
tree_view.append_column(column)
for i, title in enumerate(header[1:]):
# column for select values
bool_renderer = gtk.CellRendererToggle()
bool_renderer.set_property("activatable", True)
bool_renderer.connect("toggled", callback, store)
column = gtk.TreeViewColumn(title, bool_renderer, active=2 + i)
column.set_sort_column_id(2)
tree_view.append_column(column)
for item in items:
store.append(item)
def get():
def take_selected(model, path, iter, data):
selected = model.get_value(iter, SELECTED)
if (selected):
data[0].append(model.get_value(iter, VLABEL))
data[1].append(model.get_value(iter, VALUE))
values, vlabels = [], []
store.foreach(take_selected, (vlabels, values))
return (label, repr(vlabels), values)
self.add_widget(key, label, scw, get)
# -------------------------------------------------------------------------
# add specific data types
def add_int(self, key, label, default_value):
self.add_entry(key, label, default_value, strToValue=int)
def add_positive_int(self, key, label, default_value):
def validator(value):
if value <= 0:
return "The number must be greater than zero."
return None
self.add_entry(key, label, default_value, validator, int)
# register new signals to setting widget
gobject.type_register(SettingWidget)
""" It is emitted when the widget get the focus. """
gobject.signal_new("select-key",
SettingWidget,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
""" It is emitted when the widget lose the focus. """
gobject.signal_new("value-committed",
SettingWidget,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
# *****************************************************************************
# Setting dialog with status-bar
class SettingPage(gtk.VBox):
def __init__(self, setting_widget=None):
gtk.VBox.__init__(self, False)
self.setting_widget = setting_widget
self.wrong_keys = [] # a list of not valid values
scw = gtk.ScrolledWindow()
scw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.sw_vbox = gtk.VBox(False)
self.sw_vbox.set_border_width(5)
# due to scrolled window sw_vbox must not be empty
# the label will be removed
self.sw_vbox.pack_start(gtk.Label())
scw.add_with_viewport(self.sw_vbox)
self.sw_vbox.show()
self.pack_start(scw, True, True)
scw.show()
self.label_msg = gtk.Label()
self.infobar = gtk.InfoBar()
self.infobar.set_message_type(gtk.MESSAGE_WARNING)
self.infobar.get_content_area().add(self.label_msg)
self.pack_start(self.infobar, False, False)
self.set_setting_widget(self.setting_widget)
def set_setting_widget(self, sw):
if sw is None:
self.remove_settig_widget()
return
for child in self.sw_vbox.get_children():
self.sw_vbox.remove(child)
self.setting_widget = sw
self.setting_widget.connect("value-committed",self._cb_check_value)
self.sw_vbox.pack_start(self.setting_widget, True, True)
self.setting_widget.show_all()
self.set_infobar()
self.show()
def remove_settig_widget(self):
if self.setting_widget is not None:
self.sw_vbox.remove(self.setting_widget)
self.setting_widget = None
self.wrong_keys = []
def are_values_correct(self):
if self.setting_widget is None:
return False # TODO: is it true? When there are no values all of them are right, aren't they?
return self.setting_widget.are_values_correct()
def set_infobar(self):
if self.are_values_correct():
self.set_wrong_message()
return
for key in self.setting_widget.keys:
msg = self.setting_widget.validate_value(key)
if msg is not None:
self.wrong_keys.append(key)
if self.wrong_keys:
key = self.wrong_keys[0]
msg = self.setting_widget.validate_value(key)
self.set_wrong_message(key, msg)
def set_wrong_message(self, key=None, message=None):
if message is None:
if key in self.wrong_keys:
self.wrong_keys.remove(key)
if self.wrong_keys: # check if is it there other unsolved key
old_key = self.wrong_keys[0]
self.set_wrong_message(
old_key, self.setting_widget.validate_value(old_key))
return
self.label_msg.set_text("")
self.infobar.hide_all()
else:
if key not in self.wrong_keys:
self.wrong_keys.append(key)
self.label_msg.set_markup("<b>{0}:</b> {1}".format(
self.setting_widget.get_label(key), message))
self.infobar.show_all()
self.emit("values-correctness-changed", not self.wrong_keys)
def _cb_check_value(self, sw, key):
self.set_wrong_message(key, sw.validate_value(key))
# register a new signal to the setting page widget
gobject.type_register(SettingPage)
""" It is emitted when the setting page changes the wrong messages list. """
gobject.signal_new("values-correctness-changed",
SettingPage,
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
class BasicSettingDialog(gtk.Dialog):
"""Default setting dialog containing a status-bar informs about messages
got from validators. The dialog have no buttons; they must be added
manually.
"""
def __init__(self, setting_widget, title, window=None):
gtk.Dialog.__init__(self,
title=title,
parent=window,
flags=gtk.DIALOG_MODAL,
buttons=None)
self.setting_widget = setting_widget
self.protected_buttons = []
sp = SettingPage(self.setting_widget)
sp.connect("values-correctness-changed", self._cb_set_protected_buttons)
self.vbox.pack_start(sp, True, True)
self.vbox.show()
def add_button(self, button_text, response_id, protected=False):
""" Overridden version of method for adding buttons. This one has the
same behavior as the super class method. But moreover it is possible
to specify whether the button is protected or not.
Arguments:
button_text -- label of button
response_id -- response after click on it (default gtk responses)
Keywords:
protected -- True if the button is against of wrong setting values;
the button is locked when the setting widget is not
correct (default: False).
"""
button = super(BasicSettingDialog, self).add_button(button_text,
response_id)
if protected:
self.protected_buttons.append(button)
return button
def get_setting(self, key):
return self.setting_widget.get_value(key)
def _cb_set_protected_buttons(self, setting_page, are_values_correct):
for button in self.protected_buttons:
button.set_sensitive(are_values_correct)
class BasicSettingAssistant(gtk.Assistant):
def __init__(self, pages_count, title, parent=None):
assert pages_count > 0
gtk.Assistant.__init__(self)
self.get_value_functions = {}
self._response = gtk.RESPONSE_NONE
self._last_page = 0
self.set_title(title)
if parent is not None:
self.set_transient_for(parent)
self.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.set_modal(True)
self.set_forward_page_func(self.__forward_page)
self.pages_count = pages_count
self.pages = []
self.create_functions = []
if self.pages_count == 1:
self.__add_empty_setting_page(gtk.ASSISTANT_PAGE_INTRO)
self.__add_empty_summary_page()
else:
self.__add_empty_setting_page(gtk.ASSISTANT_PAGE_INTRO)
for i in xrange(1, self.pages_count):
self.__add_empty_setting_page(gtk.ASSISTANT_PAGE_CONTENT)
self.__add_empty_summary_page()
self.connect("apply", self._cb_apply)
self.connect("cancel", self._cb_cancel)
self.connect("close", self._cb_close)
self.connect("prepare", self._cb_prepare)
def append_setting_widget(self, title, fn_create_setting_widget):
''' all setting pages are created (they are empty). When a setting
widget is added, it is added a function that creates a content.
'''
sp = self.pages[len(self.create_functions)]
self.set_page_title(sp, title)
self.create_functions.append(fn_create_setting_widget)
def create_setting_widget(self, page_num, previous_setting_widget=None):
sw = self.create_functions[page_num](previous_setting_widget)
sp = self.get_nth_page(page_num)
sp.set_setting_widget(sw)
self.set_page_complete(sp, sp.are_values_correct())
def fill_summary_widget(self, smp):
smp.reset_summary_page()
for n in xrange(self.pages_count): # make a summary
sp = self.get_nth_page(n)
sw = sp.setting_widget
smp.add_page_title(self.get_page_title(sp))
for key in sw.keys:
smp.add_setting_value(*sw.get(key))
smp.show_all()
self.set_page_complete(smp, True)
def reset_pages_from(self, page_num):
for n in xrange(page_num, self.pages_count):
sp = self.get_nth_page(n)
sp.remove_settig_widget()
def run(self):
self.show_all()
gtk.main()
return self._response
def get_setting(self, key):
return self.get_value_functions[key](key)
def _cb_apply(self, bsa):
for n in xrange(bsa.pages_count):
sp = bsa.get_nth_page(n)
sw = sp.setting_widget
for key in sw.keys:
assert key not in self.get_value_functions
# store get_value function from setting widget to the key
self.get_value_functions[key] = sw.get_value
self._response = gtk.RESPONSE_APPLY
def _cb_cancel(self, bsa):
bsa.destroy()
gtk.main_quit()
self._response = gtk.RESPONSE_CANCEL
def _cb_close(self, bsa):
self.destroy()
gtk.main_quit()
def _cb_prepare(self, bsa, sp):
csp_num = bsa.get_current_page()
page_type = bsa.get_page_type(sp)
if page_type == gtk.ASSISTANT_PAGE_CONFIRM:
self.fill_summary_widget(sp)
return
if sp.setting_widget is not None:
return
if page_type == gtk.ASSISTANT_PAGE_INTRO:
bsa.create_setting_widget(csp_num)
elif page_type == gtk.ASSISTANT_PAGE_CONTENT:
previous_sp = bsa.get_nth_page(csp_num - 1)
bsa.create_setting_widget(csp_num, previous_sp.setting_widget)
def __forward_page(self, current_page):
if self._last_page > current_page:
'''after back step/s, it is supposed that something has changed,
then next pages are reseted.'''
self.reset_pages_from(current_page+1)
self._last_page = current_page
page = self.get_nth_page(current_page)
if self.get_page_type(page) == gtk.ASSISTANT_PAGE_CONFIRM:
return -1
return current_page + 1
def __add_empty_setting_page(self, page_type):
sp = SettingPage()
sp.connect("values-correctness-changed", self.set_page_complete)
self.pages.append(sp)
self.append_page(sp)
self.set_page_type(sp, page_type)
def __add_empty_summary_page(self):
smp = self.SummaryPage()
self.append_page(smp)
self.set_page_type(smp, gtk.ASSISTANT_PAGE_CONFIRM)
self.set_page_title(smp, "Configuration summary")
class SummaryPage(gtk.ScrolledWindow):
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.table = gtk.Table(1, 2, False)
# due to scrolled window table must not be empty
# the label will be removed
self.table.attach(gtk.Label(), 0, 1, 0,1)
self.add_with_viewport(self.table)
self.row = 0
def add_page_title(self, title):
label = gtk.Label()
label.set_markup("<b>{0}</b>".format(title))
label.set_alignment(0, 1)
self.table.attach(label,
0, 2,
self.row, self.row+1,
xoptions=gtk.FILL, yoptions=0,
xpadding=10, ypadding=10)
self.row += 1
self.table.resize(self.row, 2)
def add_setting_value(self, name, vlabel, value):
label = gtk.Label()
label.set_markup("<i>{0}:</i>".format(name))
label.set_alignment(1, 1)
self.table.attach(label,
0, 1,
self.row, self.row+1,
xoptions=gtk.FILL, yoptions=0,
xpadding=10, ypadding=3)
label = gtk.Label(repr(value) if vlabel is None else vlabel)
label.set_alignment(0, 1)
self.table.attach(label,
1, 2,
self.row, self.row+1,
xoptions=gtk.FILL, yoptions=0,
xpadding=0, ypadding=3)
self.row += 1
self.table.resize(self.row, 2)
def reset_summary_page(self):
for child in self.table.get_children():
self.table.remove(child)
self.table.resize(1, 2)
|
gpl-3.0
|
vadimtk/chrome4sdp
|
tools/telemetry/telemetry/value/list_of_string_values.py
|
13
|
4199
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import value as value_module
from telemetry.value import none_values
class ListOfStringValues(value_module.Value):
def __init__(self, page, name, units, values,
important=True, description=None,
tir_label=None, none_value_reason=None,
same_page_merge_policy=value_module.CONCATENATE):
super(ListOfStringValues, self).__init__(page, name, units, important,
description, tir_label)
if values is not None:
assert isinstance(values, list)
assert len(values) > 0
assert all(isinstance(v, basestring) for v in values)
none_values.ValidateNoneValueReason(values, none_value_reason)
self.values = values
self.none_value_reason = none_value_reason
self.same_page_merge_policy = same_page_merge_policy
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
if self.same_page_merge_policy == value_module.CONCATENATE:
merge_policy = 'CONCATENATE'
else:
merge_policy = 'PICK_FIRST'
return ('ListOfStringValues(%s, %s, %s, %s, '
'important=%s, description=%s, tir_label=%s, '
'same_page_merge_policy=%s)') % (
page_name,
self.name,
self.units,
repr(self.values),
self.important,
self.description,
self.tir_label,
merge_policy)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
return self.values
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return repr(self.values)
def IsMergableWith(self, that):
return (super(ListOfStringValues, self).IsMergableWith(that) and
self.same_page_merge_policy == that.same_page_merge_policy)
@staticmethod
def GetJSONTypeName():
return 'list_of_string_values'
def AsDict(self):
d = super(ListOfStringValues, self).AsDict()
d['values'] = self.values
if self.none_value_reason is not None:
d['none_value_reason'] = self.none_value_reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
kwargs['values'] = value_dict['values']
if 'none_value_reason' in value_dict:
kwargs['none_value_reason'] = value_dict['none_value_reason']
if 'tir_label' in value_dict:
kwargs['tir_label'] = value_dict['tir_label']
return ListOfStringValues(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
if v0.same_page_merge_policy == value_module.PICK_FIRST:
return ListOfStringValues(
v0.page, v0.name, v0.units,
values[0].values,
important=v0.important,
same_page_merge_policy=v0.same_page_merge_policy,
none_value_reason=v0.none_value_reason)
assert v0.same_page_merge_policy == value_module.CONCATENATE
return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, None, v0.name, v0.tir_label)
@classmethod
def _MergeLikeValues(cls, values, page, name, tir_label):
v0 = values[0]
merged_values = []
none_value_reason = None
for v in values:
if v.values is None:
merged_values = None
none_value_reason = none_values.MERGE_FAILURE_REASON
break
merged_values.extend(v.values)
return ListOfStringValues(
page, name, v0.units,
merged_values,
important=v0.important,
tir_label=tir_label,
same_page_merge_policy=v0.same_page_merge_policy,
none_value_reason=none_value_reason)
|
bsd-3-clause
|
asidev/aybu-manager
|
aybu/manager/rest/__init__.py
|
1
|
3955
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import zmq
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from zmq.devices.basedevice import ThreadDevice
from aybu.manager.models import Base, Environment
from . authentication import AuthenticationPolicy
from . request import Request
def main(global_config, **settings):
engine = engine_from_config(settings, 'sqlalchemy.')
Base.metadata.create_all(engine)
Request.set_db_engine(engine)
authentication_policy = AuthenticationPolicy(
realm=settings['authentication.realm'])
config = Configurator(settings=settings, request_factory=Request,
default_permission='admin',
authentication_policy=authentication_policy)
config.include(includeme)
log = logging.getLogger(__name__)
log.info("Starting zmq QUEUE (%s ==> |QUEUE| ==> %s)",
settings['zmq.queue_addr'], settings['zmq.daemon_addr'])
device = ThreadDevice(zmq.QUEUE, zmq.REP, zmq.REQ)
device.bind_in(settings['zmq.queue_addr'])
device.connect_out(settings['zmq.daemon_addr'])
device.setsockopt_in(zmq.IDENTITY, 'REP')
device.setsockopt_out(zmq.IDENTITY, 'REQ')
device.start()
return config.make_wsgi_app()
def includeme(config):
Environment.initialize(config.registry.settings, None)
config.include(add_routes)
config.add_renderer('taskresponse',
'aybu.manager.rest.renderers.TaskResponseRender')
config.add_renderer(None, 'pyramid.renderers.json_renderer_factory')
config.scan()
def add_routes(config):
aclfct = 'aybu.manager.rest.authentication.AuthenticatedFactory'
config.add_route('aliases', '/aliases', factory=aclfct)
config.add_route('alias', '/aliases/{domain}', factory=aclfct)
config.add_route('archives', '/archives', factory=aclfct)
config.add_route('archive', '/archives/{name}', factory=aclfct)
config.add_route('environments', '/environments', factory=aclfct)
config.add_route('environment', '/environments/{name}', factory=aclfct)
config.add_route('groups', '/groups', factory=aclfct)
config.add_route('group', '/groups/{name}', factory=aclfct)
config.add_route('instances', '/instances', factory=aclfct)
config.add_route('instance', '/instances/{domain}', factory=aclfct)
config.add_route('instance_groups', '/instances/{domain}/groups',
factory=aclfct)
config.add_route('instance_group', '/instances/{domain}/groups/{group}',
factory=aclfct)
config.add_route('instance_users', '/instances/{domain}/users',
factory=aclfct)
config.add_route('redirects', '/redirects', factory=aclfct)
config.add_route('redirect', '/redirects/{source}', factory=aclfct)
config.add_route('tasks', '/tasks', factory=aclfct)
config.add_route('task', '/tasks/{uuid}', factory=aclfct)
config.add_route('tasklogs', '/tasks/{uuid}/logs', factory=aclfct)
config.add_route('themes', '/themes', factory=aclfct)
config.add_route('theme', '/themes/{name}', factory=aclfct)
config.add_route('users', '/users', factory=aclfct)
config.add_route('user', '/users/{email}', factory=aclfct)
config.add_route('user_instances', '/users/{email}/instances',
factory=aclfct)
|
apache-2.0
|
edt-devel/rss2maildir
|
rss2maildir.py
|
1
|
12421
|
#!/usr/bin/env python3
"""This script downloads rss feeds and stores them in a maildir"""
# Copyright(C) 2015 Edgar Thier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import mailbox
import feedparser
import sys
import getopt
import time
import json
import getpass
import urllib.request
class defaults:
"""Contains global default values"""
maildir = os.path.expanduser("~/.mail/rss/")
config = os.path.expanduser("~/.cache/rss2maildir.json")
cache = os.path.expanduser("~/.cache/rss2mail/")
maildir_cache = os.path.expanduser("~/.mail/rss.rss2maildircache")
use_single_maildir = False
use_maildir_cache = False
mail_sender = "rss2mail"
mail_recipient = getpass.getuser() + "@localhost"
class rss_feed:
""""""
def __init__(self):
self.name = ""
self.url = ""
self.maildir = ""
self.feed = None
self.cache = None
self.xml = None
def load_config():
"""Load configuration from JSON"""
json_data = open(defaults.config).read()
config = json.loads(json_data)
if "use_single_maildir" in config["general"]:
defaults.use_single_maildir = config["general"]["use_single_maildir"]
defaults.cache = defaults.maildir_cache
if not isinstance(defaults.use_single_maildir, bool):
print("use_single_maildir has to be true or false")
exit(1)
if "use_maildir_cache" in config["general"]:
defaults.use_maildir_cache = config["general"]["use_maildir_cache"]
if not isinstance(defaults.use_maildir_cache, bool):
print("use_maildir_cache has to be true or false")
exit(1)
if "sender" in config["general"]:
defaults.mail_sender = config["general"]["sender"]
if not isinstance(defaults.mail_sender, str):
print("sender has to be a string")
exit(1)
if "recipient" in config["general"]:
defaults.mail_recipient = config["general"]["recipient"]
if not isinstance(defaults.mail_recipient, str):
print("recipient has to be a string")
exit(1)
if "cache" in config["general"]:
defaults.cache = config["general"]["cache"]
if not isinstance(defaults.cache, str):
print("cache has to be a string")
exit(1)
if "maildir" in config["general"]:
defaults.maildir = config["general"]["maildir"]
if not isinstance(defaults.cache, str):
print("maildir has to be a string")
exit(1)
feed_list = []
for single_feed in config["feeds"]:
feed = rss_feed()
feed.name = single_feed["name"]
feed.url = single_feed["url"]
if defaults.use_single_maildir:
feed.maildir = defaults.maildir
else:
feed.maildir = defaults.maildir + "." + feed.name
if 'maildir' in single_feed:
feed.maildir = single_feed["maildir"]
if not feed.name:
print("Missing feed name. Aborting...")
exit(1)
if not feed.url:
print("Missing feed url. Aborting...")
exit(2)
feed_list.append(feed)
return feed_list
def update_maildir(maildir, rss, origin):
"""
Creates or updates the given maildir and fills it with the messages
maildir - Maildir that shall be used
rss - feedparser entry that shall be converted
"""
print("Writing {0}".format(rss.title))
mbox = mailbox.Maildir(maildir)
mbox.lock()
try:
msg = mailbox.MaildirMessage()
# msg.set_charset('utf-8')
if 'published' in rss:
msg.set_unixfrom('{0} Date: {1}'.format(origin, rss.published))
msg.__setitem__('Date', rss.published)
elif 'updated' in rss:
# atom feeds use '2015-05-31T19:57:15+02:00'
# python requires timezone offset to be without ':'
time_string = rss.updated
if 'Z' in time_string:
# special cases like: http://www.geeksworld.org/flux.rss.php
# do not set utc offset
# their timestamp looks like 2015-07-06T00:01:00Z
entry_time = time.strptime(time_string, '%Y-%m-%dT%H:%M:%SZ')
msg.__setitem__('Date',
time.strftime("%a, %d %b %Y %H:%M:%S %z",
entry_time))
else:
k = rss.updated.rfind(":")
time_string = time_string[:k] + time_string[k+1:]
entry_time = time.strptime(time_string, '%Y-%m-%dT%H:%M:%S%z')
msg.__setitem__('Date',
time.strftime("%a, %d %b %Y %H:%M:%S %z",
entry_time))
else:
print("no date available")
msg['From'] = origin
msg['To'] = defaults.mail_recipient
msg['Subject'] = rss.title
message_text = ""
if "link" in rss:
message_text = rss.link + "\n"
if "description" in rss:
message_text = message_text + rss.description
message = message_text
msg.set_payload(message.encode('utf-8'))
mbox.add(msg)
mbox.flush()
finally:
mbox.unlock()
def load_cache(rss_list):
"""Load cache file and fill rss feeds with their values"""
for rss in rss_list:
filename = os.path.expanduser(defaults.cache) + "/" + rss.name
if os.path.isfile(filename):
with open(filename, 'rb') as input_file:
rss.cache = feedparser.parse(input_file.read().replace('\n',
''))
def save_object(obj, filename):
"""Save object to given file"""
with open(filename, 'wb') as output:
output.write(obj)
def write_cache(rss_list):
"""
rss_list - list of rss_feed objects that should be cached
"""
if not os.path.exists(defaults.cache):
os.makedirs(defaults.cache)
for rss in rss_list:
filename = os.path.expanduser(defaults.cache) + "/" + rss.name
save_object(rss.xml, filename)
def read_mail_cache(rss_list):
"""Read cache from Maildir and fill rss_list caches where possible"""
print("Reading mail cache {0}".format(defaults.cache))
mbox = mailbox.Maildir(defaults.cache)
mbox.lock()
try:
for key, message in mbox.iteritems():
try:
byte_pickle = message.get_payload(decode=True)
except:
print("Unable to open cache file ignoring")
continue
for rss in rss_list:
print(" Comparing {0} to {1}".format(message['subject'],
rss.name))
if rss.name == message['subject']:
print("Found cache for {0}".format(rss.name))
rss.cache = feedparser.parse(byte_pickle)
mbox.remove(key)
mbox.flush()
break
finally:
mbox.unlock()
def clear_mail_cache():
"""Delete all mails found in cache Maildir"""
mbox = mailbox.Maildir(defaults.cache)
mbox.lock()
for key, msg in mbox.iteritems():
mbox.remove(key)
mbox.flush()
mbox.close()
def write_mail_cache(rss_list):
"""
Write elements from rss_list to Maildir
"""
# Ensure mail cache is empty, so that we do not produce duplicates
# clear_mail_cache()
print("Writing mail cache {0}".format(defaults.cache))
mbox = mailbox.Maildir(defaults.cache)
mbox.lock()
try:
for f in rss_list:
print("Saving at: {0}".format(f.name))
msg = mailbox.MaildirMessage()
msg.__setitem__('Date',
time.strftime("%a, %d %b %Y %H:%M:%S %z",
time.gmtime()))
msg['From'] = defaults.mail_sender
msg['To'] = defaults.mail_recipient
msg['Subject'] = f.name
try:
msg.set_payload(f.xml.encode('utf-8'))
print("Saving mail cache for {0}".format(f.name))
mbox.add(msg)
except Exception as e:
print("Unable to create cache object for {0} -> {1}".format(f.name, e))
continue
mbox.flush()
finally:
mbox.unlock()
def extract_new_items(new_list, old_list):
"""Extract new feed entries
new_list - list from which new entries shall be extracted
old_list - list whith which new_list is compared
returns array of entries found in new_list and not in old_list
"""
has_guid = False
if not new_list:
print("Empty list!")
return []
if "id" in new_list[0]:
has_guid = True
new_entries = []
for item in new_list:
is_new = True
if has_guid:
for j in old_list:
if item.id == j.id:
is_new = False
break
else:
for j in old_list:
if item.link == j.link:
is_new = False
break
if is_new:
new_entries.append(item)
return new_entries
def download_feed(feed):
"""
feed - rss_feed object
"""
if feed.url is None:
print("No viable url found! Aborting feed...")
return False
print("Downloading '{0}'...".format(feed.url))
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent': user_agent, }
request = urllib.request.Request(feed.url, None, headers)
response = urllib.request.urlopen(request, None, timeout=10)
data = response.read()
feed.xml = data.decode('utf-8')
feed.feed = feedparser.parse(feed.xml)
if not feed.feed:
print("Unable to download {0}".format(feed.url))
return
if feed.cache is not None:
# diff the two lists and only use new entries
new_entries = extract_new_items(feed.feed.entries, feed.cache.entries)
for item in new_entries:
print(" New entry: {0}".format(item.title))
else:
# it is a new feed
new_entries = feed.feed.entries
maildir = feed.maildir
if new_entries:
for item in new_entries:
update_maildir(maildir, item, feed.feed['feed']['title'])
else:
print(" No new messages.")
def print_help():
"""Prints help text and arguments"""
print("""{0}
Download rss feeds and convert them to maildir entries.
Options:
\t-h print help text
\t-c define config to use [default: {1}]
\t-t define cache directory to use [default: {2}]
""".format(sys.argv[0],
defaults.config,
defaults.cache))
def main(argv):
"""Entry point"""
try:
opts, args = getopt.getopt(argv,
"hc:t:",
["help", "config=", "cache="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in ("-c", "--config"):
defaults.config = arg
elif opt in ("-t", "--cache"):
defaults.cache = arg
feeds = load_config()
if defaults.use_maildir_cache:
read_mail_cache(feeds)
else:
load_cache(feeds)
for single_feed in feeds:
download_feed(single_feed)
if defaults.use_maildir_cache:
write_mail_cache(feeds)
else:
write_cache(feeds)
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-3.0
|
2014c2g14/2014c2
|
w2/static/Brython2.0.0-20140209-164925/Lib/xml/dom/expatbuilder.py
|
733
|
35733
|
"""Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
|
gpl-2.0
|
mrbox/django
|
tests/expressions_case/tests.py
|
4
|
50271
|
from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import F, Q, Max, Min, Value
from django.db.models.expressions import Case, When
from django.test import TestCase
from django.utils import six
from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel
try:
from PIL import Image
except ImportError:
Image = None
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string='4')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
# GROUP BY on Oracle fails with TextField/BinaryField; see #24096.
cls.non_lob_fields = [
f.name for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created) and not isinstance(f, (models.BinaryField, models.TextField))
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer=1, then=F('o2o_rel__integer') + 1),
When(integer=2, then=F('o2o_rel__integer') + 3),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer2=F('o2o_rel__integer'), then=Value('equal')),
When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
f_plus_3=F('integer') + 3,
).annotate(
f_test=Case(
When(integer=1, then='f_plus_1'),
When(integer=2, then='f_plus_3'),
default='integer',
),
).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).annotate(
f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('f_plus_1'), then=Value('+1')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F('integer') - 2,
).annotate(
test=Case(
When(f_minus_2=-1, then=Value('negative one')),
When(f_minus_2=0, then=Value('zero')),
When(f_minus_2=1, then=Value('one')),
default=Value('other'),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
transform=itemgetter('integer', 'test', 'min', 'max')
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer2=F('min'), then=Value('min')),
When(integer2=F('max'), then=Value('max')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')],
transform=itemgetter('integer', 'integer2', 'test')
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(max=3, then=Value('max = 3')),
When(max=4, then=Value('max = 4')),
default=Value(''),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'),
(3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')],
transform=itemgetter('integer', 'max', 'test')
)
def test_annotate_exclude(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).exclude(test='other').order_by('pk'),
[(1, 'one'), (2, 'two'), (2, 'two')],
transform=attrgetter('integer', 'test')
)
def test_annotate_values_not_in_order_by(self):
self.assertEqual(
list(CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
When(integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('test').values_list('integer', flat=True)),
[1, 4, 3, 3, 3, 2, 2]
)
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
) + 1,
).order_by('pk'),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter('integer', 'test')
)
if connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 7, 0):
# There is a bug in sqlite < 3.7.0, where placeholder order is lost.
# Thus, the above query returns <condition_value> + <result_value>
# for each matching case instead of <result_value> + 1 (#24148).
test_combined_expression = unittest.expectedFailure(test_combined_expression)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F('integer2'), then='pk'),
When(integer=4, then='pk'),
output_field=models.IntegerField(),
),
).values('test')).order_by('pk'),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter('integer', 'integer2')
)
def test_case_reuse(self):
SOME_CASE = Case(
When(pk=0, then=Value('0')),
default=Value('1'),
output_field=models.CharField(),
)
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk'),
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk').values_list('pk', 'somecase'),
lambda x: (x.pk, x.somecase)
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(
When(integer=1, then=1),
output_field=models.IntegerField(),
)),
two=models.Sum(Case(
When(integer=2, then=1),
output_field=models.IntegerField(),
)),
three=models.Sum(Case(
When(integer=3, then=1),
output_field=models.IntegerField(),
)),
four=models.Sum(Case(
When(integer=4, then=1),
output_field=models.IntegerField(),
)),
),
{'one': 1, 'two': 2, 'three': 3, 'four': 1}
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(When(integer=1, then='integer'))),
two=models.Sum(Case(When(integer=2, then=F('integer') - 1))),
three=models.Sum(Case(When(integer=3, then=F('integer') + 1))),
),
{'one': 1, 'two': 2, 'three': 12}
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=models.Sum(Case(
When(integer2=F('integer'), then=1),
output_field=models.IntegerField(),
)),
plus_one=models.Sum(Case(
When(integer2=F('integer') + 1, then=1),
output_field=models.IntegerField(),
)),
),
{'equal': 3, 'plus_one': 4}
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('integer') + 1),
When(integer=3, then=F('integer')),
default='integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string=Case(
When(integer2=F('integer'), then=Value('2')),
When(integer2=F('integer') + 1, then=Value('3')),
output_field=models.CharField(),
)).order_by('pk'),
[(3, 4, '3'), (2, 2, '2'), (3, 4, '3')],
transform=attrgetter('integer', 'integer2', 'string')
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('o2o_rel__integer') + 1),
When(integer=3, then=F('o2o_rel__integer')),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F('integer'),
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(integer=2, then='f_plus_1'),
When(integer=3, then='f'),
),
).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer=Case(
When(integer2=F('integer'), then=2),
When(integer2=F('f_plus_1'), then=3),
output_field=models.IntegerField(),
),
).order_by('pk'),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
output_field=models.IntegerField(),
),
).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer2=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(integer2=F('min'), then=2),
When(integer2=F('max'), then=3),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
).order_by('pk'),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter('integer', 'integer2', 'max')
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'string')
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'integer2')
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)],
transform=attrgetter('string', 'integer')
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'string')
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'big_integer')
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
# fails on postgresql on Python 2.7 if output_field is not
# set explicitly
When(integer=1, then=Value(b'one', output_field=models.BinaryField())),
When(integer=2, then=Value(b'two', output_field=models.BinaryField())),
default=Value(b'', output_field=models.BinaryField()),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')],
transform=lambda o: (o.integer, six.binary_type(o.binary))
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)],
transform=attrgetter('integer', 'boolean')
)
def test_update_comma_separated_integer(self):
CaseTestModel.objects.update(
comma_separated_integer=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2,2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2,2'), (3, ''), (2, '2,2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'comma_separated_integer')
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date')
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date_time')
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal('1.1')),
When(integer=2, then=Decimal('2.2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, Decimal('1.1')),
(2, Decimal('2.2')),
(3, None),
(2, Decimal('2.2')),
(3, None),
(3, None),
(4, None)
],
transform=attrgetter('integer', 'decimal')
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing timedeltas
When(integer=1, then=Value(timedelta(1), output_field=models.DurationField())),
When(integer=2, then=Value(timedelta(2), output_field=models.DurationField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'duration')
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value('[email protected]')),
When(integer=2, then=Value('[email protected]')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '[email protected]'), (2, '[email protected]'), (3, ''), (2, '[email protected]'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'email')
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.file))
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'file_path')
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'float')
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.image))
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
# fails on postgresql if output_field is not set explicitly
When(integer=1, then=Value('1.1.1.1')),
When(integer=2, then=Value('2.2.2.2')),
output_field=models.GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'generic_ip_address')
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean')
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_integer')
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_small_integer')
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'slug')
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'small_integer')
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=['1', '2']).update(
string=Case(
When(integer=1, then=Value('1', output_field=models.CharField())),
When(integer=2, then=Value('2', output_field=models.CharField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'),
[(1, '1'), (2, '2'), (2, '2')],
transform=attrgetter('integer', 'string')
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'text')
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing times
When(integer=1, then=Value(time(1), output_field=models.TimeField())),
When(integer=2, then=Value(time(2), output_field=models.TimeField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'time')
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value('http://1.example.com/')),
When(integer=2, then=Value('http://2.example.com/')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'),
(3, ''), (3, ''), (4, '')
],
transform=attrgetter('integer', 'url')
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing UUIDs
When(integer=1, then=Value(
UUID('11111111111111111111111111111111'),
output_field=models.UUIDField(),
)),
When(integer=2, then=Value(
UUID('22222222222222222222222222222222'),
output_field=models.UUIDField(),
)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, UUID('11111111111111111111111111111111')),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter('integer', 'uuid')
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'fk_id')
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value('less than 2')),
When(integer__gt=2, then=Value('greater than 2')),
default=Value('equal to 2'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'),
(3, 'greater than 2'), (4, 'greater than 2')
],
transform=attrgetter('integer', 'test')
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'),
(3, 3, 'default'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'),
(3, 3, 'when'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_order_by_conditional_implicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by('test', 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_order_by_conditional_explicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by(F('test').asc(), 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_join_promotion(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
output_field=models.IntegerField()
),
),
[(o, 3)],
lambda x: (x, x.foo)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
output_field=models.IntegerField()
),
),
[(o, 2)],
lambda x: (x, x.foo)
)
def test_join_promotion_multiple_annotations(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
output_field=models.IntegerField()
),
bar=Case(
When(fk_rel__pk=1, then=4),
default=5,
output_field=models.IntegerField()
),
),
[(o, 3, 5)],
lambda x: (x, x.foo, x.bar)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
output_field=models.IntegerField()
),
bar=Case(
When(fk_rel__isnull=True, then=4),
default=5,
output_field=models.IntegerField()
),
),
[(o, 2, 4)],
lambda x: (x, x.foo, x.bar)
)
def test_m2m_exclude(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).order_by('integer')
# The first o has 2 as its fk_rel__integer=1, thus it hits the
# default=2 case. The other ones have 2 as the result as they have 2
# fk_rel objects, except for integer=4 and integer=10 (created above).
# The integer=4 case has one integer, thus the result is 1, and
# integer=10 doesn't have any and this too generates 1 (instead of 0)
# as ~Q() also matches nulls.
self.assertQuerysetEqual(
qs,
[(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)],
lambda x: x[1:]
)
def test_m2m_reuse(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
# Need to use values before annotate so that Oracle will not group
# by fields it isn't capable of grouping by.
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).annotate(
cnt2=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).order_by('integer')
self.assertEqual(str(qs.query).count(' JOIN '), 1)
self.assertQuerysetEqual(
qs,
[(1, 2, 2), (2, 2, 2), (2, 2, 2), (3, 2, 2), (3, 2, 2), (3, 2, 2), (4, 1, 1), (10, 1, 1)],
lambda x: x[1:]
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name='Jane Doe',
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name='James Smith',
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name='Jack Black',
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value('5%')),
When(account_type=Client.PLATINUM, then=Value('10%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value('10%')),
When(registered_on__lte=a_month_ago, then=Value('5%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.all().order_by('pk'),
[('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name='Jean Grey',
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name='James Bond',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name='Jane Porter',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=models.Sum(Case(
When(account_type=Client.REGULAR, then=1),
output_field=models.IntegerField(),
)),
gold=models.Sum(Case(
When(account_type=Client.GOLD, then=1),
output_field=models.IntegerField(),
)),
platinum=models.Sum(Case(
When(account_type=Client.PLATINUM, then=1),
output_field=models.IntegerField(),
)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
|
bsd-3-clause
|
hoogenm/compose
|
compose/config/sort_services.py
|
7
|
2486
|
from __future__ import absolute_import
from __future__ import unicode_literals
from compose.config.errors import DependencyError
def get_service_name_from_network_mode(network_mode):
return get_source_name_from_network_mode(network_mode, 'service')
def get_container_name_from_network_mode(network_mode):
return get_source_name_from_network_mode(network_mode, 'container')
def get_source_name_from_network_mode(network_mode, source_type):
if not network_mode:
return
if not network_mode.startswith(source_type+':'):
return
_, net_name = network_mode.split(':', 1)
return net_name
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [volume_from.source for volume_from in volumes_from]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_network_mode(service.get('network_mode')) or
name == get_service_name_from_network_mode(service.get('pid')) or
name in service.get('depends_on', []))
]
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
if n['name'] in n.get('depends_on', []):
raise DependencyError('A service can not depend on itself: %s' % n['name'])
raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
|
apache-2.0
|
lz1988/company-site
|
django/db/backends/mysql/introspection.py
|
106
|
4528
|
import re
from .base import FIELD_TYPE
from django.db.backends import BaseDatabaseIntrospection
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# varchar length returned by cursor.description is an internal length,
# not visible length (#5725), use information_schema database to fix this
cursor.execute("""
SELECT column_name, character_maximum_length FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND character_maximum_length IS NOT NULL""", [table_name])
length_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [line[:3] + (length_map.get(line[0], line[3]),) + line[4:]
for line in cursor.description]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
return indexes
|
bsd-3-clause
|
justathoughtor2/atomicApe
|
cygwin/lib/python2.7/distutils/dir_util.py
|
46
|
7868
|
"""distutils.dir_util
Utility functions for manipulating directories and directory trees."""
__revision__ = "$Id$"
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, basestring):
raise DistutilsInternalError, \
"mkpath: 'name' must be a string (got %r)" % (name,)
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = {}
for file in files:
need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
need_dirs = need_dir.keys()
need_dirs.sort()
# Now create them
for dir in need_dirs:
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError, \
"cannot copy tree '%s': not a directory" % src
try:
names = os.listdir(src)
except os.error, (errno, errstr):
if dry_run:
names = []
else:
raise DistutilsFileError, \
"error listing files in '%s': %s" % (src, errstr)
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except (IOError, OSError), exc:
log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
|
gpl-3.0
|
ucsd-ccbb/jupyter-genomics
|
src/microbiome/test/test_exec_bowtie.py
|
1
|
19830
|
""" Module to test exec_bowtie member methods.
This module contains unit tests for exec_bowtie.py.
"""
import os
import unittest
import shutil
from src import exec_bowtie
__author__ = "YiDing Fang"
__maintainer__ = "YiDing Fang"
__email__ = "[email protected]"
__status__ = "prototype"
# input file contents. For future use.
_TRIM_R1_FASTQ_STR = """"""
_TRIM_R2_FASTQ_STR = """"""
_BOWTIE_PATH = '/usr/local/bowtie2-2.2.9/bowtie2'
class TestExecBowtie(unittest.TestCase):
""" Unit test exec_megahit methods """
def setup_path(self):
""" create strings corresponding to the temporary directories and files to be used in unit tests """
# build executable extension
_BUILD_EXT = '-build'
# input directories
_EXAMPLE_DIR = 'example'
_INDEX_DIR = 'index'
_REFERENCE_DIR = 'reference'
_READS_DIR = 'reads'
# reference files base name
_REFERENCE_FA_STR = 'lambda_virus.fa'
# base name for the sample scaffold files found in ./bowtie
_SCAFFOLD_BASE_STR = 'lambda_virus'
# input reads
_SAMPLE_1_FQ_STR = 'reads_1.fq'
_SAMPLE_2_FQ_STR = 'reads_2.fq'
# output file name
_OUTPUT_SAM_STR = 'lambda_virus_sample.sam'
# temporary directories to be used
_UNITTEST_DIR_STR = 'bowtie_unittest_temp_dir'
_OUTPUT_DIR_STR = 'output'
_INPUT_DIR_STR = 'input'
# full file paths
self.bowtie_path = _BOWTIE_PATH
self.bowtie_build_path = self.bowtie_path + _BUILD_EXT
# full file paths
self.unittest_dir = _UNITTEST_DIR_STR
self.output_dir = os.path.join(_UNITTEST_DIR_STR, _OUTPUT_DIR_STR)
self.input_dir = os.path.join(_UNITTEST_DIR_STR, _INPUT_DIR_STR)
# output index directory and index bt2 files
self.output_index_dir = os.path.join(self.output_dir, _INDEX_DIR)
self.output_scaffold_index = os.path.join(self.output_index_dir, _SCAFFOLD_BASE_STR)
# output sam file
self.output_sam = os.path.join(self.output_dir, _OUTPUT_SAM_STR)
bowtie_dir, executable = os.path.split(_BOWTIE_PATH)
bowtie_example_dir = os.path.join(bowtie_dir, _EXAMPLE_DIR)
bowtie_index_dir = os.path.join(bowtie_example_dir, _INDEX_DIR)
bowtie_reference_dir = os.path.join(bowtie_example_dir, _REFERENCE_DIR)
bowtie_reads_dir = os.path.join(bowtie_example_dir, _READS_DIR)
self.index_dir = bowtie_index_dir
self.scaffold_index = os.path.join(bowtie_index_dir, _SCAFFOLD_BASE_STR)
self.sample_reference_fa = os.path.join(bowtie_reference_dir, _REFERENCE_FA_STR)
self.sample_fq_1 = os.path.join(bowtie_reads_dir, _SAMPLE_1_FQ_STR)
self.sample_fq_2 = os.path.join(bowtie_reads_dir, _SAMPLE_2_FQ_STR)
# TODO: Check if the OSError is thrown in case we remove something improperly
def clear_dir(self, target_dir):
""" Selectively remove files in a directory using the given file extension names """
# output file extensions
_OUT_EXT = ['.fq', '.fastq', '.fa', '.fasta', '.txt', '.lib', '.bin', '.info', '.lib_info',
'.log', '.tex', '.txt', '.tsv', '.pdf', '.sam', '.bt2']
if os.path.exists(target_dir):
# remove all the files in the intermediate contigs directory
filelist = [f for f in os.listdir(target_dir) if f.endswith(tuple(_OUT_EXT))]
for f in filelist:
f_path = os.path.join(target_dir, f)
os.remove(f_path)
def setUp(self):
""" create temporary files and directories to be used in the unit tests """
self.setup_path()
# create a sample directory to use for input and output
if not os.path.exists(self.unittest_dir):
os.makedirs(self.unittest_dir)
print("created directory: {0}".format(self.unittest_dir))
else:
print("There exists conflicting directory named: {0}".format(self.unittest_dir))
temp_dir_list = [self.input_dir, self.output_dir, self.output_index_dir]
for temp_dir in temp_dir_list:
# create the appropriate directories
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
print("created directory: {0}".format(temp_dir))
else:
print("There exists conflicting directory named: {0}".format(temp_dir))
input_test_files = [self.sample_fq_1, self.sample_fq_2, self.sample_reference_fa]
for test_file in input_test_files:
if not os.path.isfile(test_file):
raise ValueError( "Input file {0} does not exist. Please check metaquast/test_Data directory for sample test files".format(test_file))
def tearDown(self):
"""delete temporary files and directories generated by setUp method and megahit subprocess calls"""
if os.path.exists(self.unittest_dir):
if os.path.exists(self.input_dir):
self.clear_dir(self.input_dir)
os.rmdir(self.input_dir)
print("removed directory: {0}".format(self.input_dir))
if os.path.exists(self.output_dir):
if os.path.exists(self.output_sam):
os.remove(self.output_sam)
expected_sub_dir_list = [self.output_index_dir]
for sub_dir in expected_sub_dir_list:
if os.path.exists(sub_dir):
shutil.rmtree(sub_dir)
print("removed directory: {0}".format(sub_dir))
os.rmdir(self.output_dir)
print("removed directory: {0}".format(self.output_dir))
# remove the unittest directory
os.rmdir(self.unittest_dir)
print("removed directory: {0}".format(self.unittest_dir))
else:
print("The unittest directory {0} does not exist".format(self.unittest_dir))
# region form_bowtie_build_cmd_list tests
def test_form_bowtie_build_cmd_list_no_args(self):
"""test that the form_bowtie_build_cmd_list correctly raises a Value Error when invalid empty string is used in
place of required input"""
# arguments to be formatted
null_bowtie_build_path = ''
null_input_contigs_fa = ''
null_output_index = ''
with self.assertRaises(ValueError):
exec_bowtie.form_bowtie_build_cmd_list(null_bowtie_build_path, null_input_contigs_fa, null_output_index)
def test_form_bowtie_build_cmd_list_invalid_num_args(self):
"""test that form_bowtie_build_cmd_list correctly raises a Type Error when the wrong number of input arguments
is used"""
with self.assertRaises(TypeError):
exec_bowtie.form_bowtie_build_cmd_list(self.bowtie_path)
def test_form_bowtie_build_cmd_list(self):
"""test shall check that from_bowtie_build_cmd_list correctly generates bowtie command list when passed valid
arguments for the bowtie-build file path, input contigs fasta file, and output index directory"""
cmd_bowtie_build_list = ['/usr/local/bowtie2-2.2.9/bowtie2-build',
'/usr/local/bowtie2-2.2.9/example/reference/lambda_virus.fa',
'bowtie_unittest_temp_dir/output/index/lambda_virus']
self.assertEqual(cmd_bowtie_build_list, exec_bowtie.form_bowtie_build_cmd_list(self.bowtie_build_path,
self.sample_reference_fa,
self.output_scaffold_index))
# endregion
# region form_bowtie_cmd_list tests
def test_form_bowtie_cmd_list_no_args(self):
"""test that the form_bowtie_cmd_list correctly raises a Value Error when invalid empty string is used in place
of required input"""
# arguments to be formatted
null_bowtie_path = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.form_bowtie_cmd_list(null_bowtie_path, null_index_path, null_pe1_fastq, null_pe2_fastq,
null_u_fastq, null_output_sam_path)
def test_form_bowtie_cmd_list_invalid_num_args(self):
"""test that form_bowtie_cmd_list correctly raises a Type Error when the wrong number of
input arguments is used"""
with self.assertRaises(TypeError):
exec_bowtie.form_bowtie_cmd_list(self.bowtie_path)
def test_form_bowtie_cmd_list(self):
"""test that form_bowtie_cmd_list correctly generates bowtie command list when passed valid arguments for
the bowtie file path, input index base name, input forward, reverse, and unpaired fastq files, and the
path to the output sam file"""
cmd_bowtie_list = ['/usr/local/bowtie2-2.2.9/bowtie2',
'-x', '/usr/local/bowtie2-2.2.9/example/index/lambda_virus',
'-1', '/usr/local/bowtie2-2.2.9/example/reads/reads_1.fq',
'-2', '/usr/local/bowtie2-2.2.9/example/reads/reads_2.fq',
'-S', 'bowtie_unittest_temp_dir/output/lambda_virus_sample.sam']
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
self.assertEqual(cmd_bowtie_list, exec_bowtie.form_bowtie_cmd_list(self.bowtie_path, self.scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u,
self.output_sam))
# endregion
# region run_bowtie_build_cmd_list
def test_run_bowtie_build_no_args(self):
"""test that run_bowtie correctly raises a Value Error when invalid empty string is used in place of
required input"""
# arguments to be formatted
null_bowtie_build_path = ''
null_input_contigs_fa = ''
null_output_index = ''
with self.assertRaises(ValueError):
exec_bowtie.run_bowtie_build(null_bowtie_build_path, null_input_contigs_fa, null_output_index)
def test_run_bowtie_build_invalid_num_args(self):
"""test that run_bowtie_build correctly raises a Type Error when the wrong number of input arguments
is used"""
with self.assertRaises(TypeError):
exec_bowtie.run_bowtie_build(self.bowtie_path)
def test_run_bowtie_build_with_existing_output_index(self):
"""test that run_bowtie_build correctly raises an OSError when the specified output index base name exists"""
_BOTIE_INDEX_FILE_EXT = '.bt2'
_SAMPLE_BOWTIE_STR = 'unittest test_run_bowtie_build_with_existing_output_index'
if not os.path.exists(self.output_index_dir):
os.mkdir(self.output_index_dir)
for x in range(1,7):
sample_index_str = self.output_scaffold_index + '.' + str(x) + _BOTIE_INDEX_FILE_EXT
sample_index_file = open(sample_index_str, 'w+')
sample_index_file.write(_SAMPLE_BOWTIE_STR)
sample_index_file.close()
with self.assertRaises(OSError):
exec_bowtie.run_bowtie_build(self.bowtie_build_path, self.sample_reference_fa, self.output_scaffold_index)
def test_run_bowtie_build_good_stderr(self):
"""test that bowtie2-build subprocess call does not report an execution Error when run_bowtie is passed valid
arguments for the bowtie path, input contigs, and output index base name"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
stdout, stderr = exec_bowtie.run_bowtie_build(self.bowtie_build_path, self.sample_reference_fa,
self.output_scaffold_index)
self.assertEqual(stderr.find(_BOWTIE_EXECUTION_ERROR), -1)
def test_run_bowtie_build_index_exists(self):
"""test that bowtie2-build subprocess call correctly generates nonempty index directory when run_bowtie is
passed valid arguments for the bowtie path, input contigs, and output index base name"""
index_file_count = 0
output, err = exec_bowtie.run_bowtie_build(self.bowtie_build_path,
self.sample_reference_fa, self.output_scaffold_index)
if os.stat(self.output_index_dir) > 0:
for f in os.listdir(self.output_index_dir):
index_file_count += 1
self.assertTrue(index_file_count > 0)
# endregion
# region run_bowtie tests
def test_run_bowtie_no_args(self):
"""test that the form_bowtie_cmd_list correctly raises a Value Error when invalid empty string is used in place
of required input"""
# arguments to be formatted
null_bowtie_path = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.run_bowtie(null_bowtie_path, null_index_path, null_pe1_fastq, null_pe2_fastq,
null_u_fastq, null_output_sam_path)
def test_run_bowtie_invalid_num_args(self):
"""test that form_bowtie_cmd_list correctly raises a Type Error when the wrong number of
input arguments is used"""
with self.assertRaises(TypeError):
exec_bowtie.run_bowtie(self.bowtie_path)
def test_run_bowtie_cmd_good_stderr(self):
"""test that bowtie2 subprocess call does not report execution errors when run_bowtie is passed valid
arguments for the bowtie file path, input index base name, input forward, reverse, and unpaired fastq files, and
the path to the output sam file"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
stdout, stderr = exec_bowtie.run_bowtie(self.bowtie_path, self.scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
self.assertEqual(stderr.find(_BOWTIE_EXECUTION_ERROR), -1)
def test_run_bowtie_cmd_list_output_sam_exists(self):
"""test that bowtie2 subprocess call generates the expected output sam file when run_bowtie is passed
valid arguments for the bowtie file path, input index base name, path to the output sam file, and
input forward, reverse, and unpaired fastq files"""
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.run_bowtie(self.bowtie_path, self.scaffold_index, sample_fq_1,
sample_fq_2, sample_fq_u, self.output_sam)
self.assertTrue(os.stat(self.output_sam) > 0)
# endregion
# region build_run_bowtie tests
def test_build_run_bowtie_no_args(self):
"""test that build_run_bowtie correctly raises a Value Error when invalid empty arguments are passed instead of
expected bowtie file path, reference contigs, output index base name, unpaired and paired end fastq, and an
output sam file path"""
# arguments to be formatted
null_bowtie_path = ''
null_input_contigs_fa = ''
null_index_path = ''
null_pe1_fastq = []
null_pe2_fastq = []
null_u_fastq = []
null_output_sam_path = ''
with self.assertRaises(ValueError):
exec_bowtie.build_run_bowtie(null_bowtie_path, null_input_contigs_fa, null_index_path, null_pe1_fastq,
null_pe2_fastq, null_u_fastq, null_output_sam_path)
def test_build_run_bowtie_invalid_num_args(self):
"""test that build_run_bowtie correctly raises a Type Error when the wrong number of arguments are passed"""
with self.assertRaises(TypeError):
exec_bowtie.build_run_bowtie(self.bowtie_path)
def test_build_run_bowtie_with_existing_output_index(self):
"""test that build_run_bowtie_exsiting_output_index correctly raises an OSError when
the specified output index base name exists"""
_BOTIE_INDEX_FILE_EXT = '.bt2'
_SAMPLE_BOWTIE_STR = 'unittest test_build_run_bowtie_with_existing_output_index'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
if not os.path.exists(self.output_index_dir):
os.mkdir(self.output_index_dir)
for x in range(1,7):
sample_index_str = self.output_scaffold_index + '.' + str(x) + _BOTIE_INDEX_FILE_EXT
sample_index_file = open(sample_index_str, 'w+')
sample_index_file.write(_SAMPLE_BOWTIE_STR)
sample_index_file.close()
with self.assertRaises(OSError):
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
def test_build_run_bowtie_cmd_good_stderr(self):
"""test that bowtie2-build and bowtie2 suprocess call do not report execution errors when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unpaired fastq files"""
_BOWTIE_EXECUTION_ERROR = 'Error:'
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
buildout, builderr, stdout, stderr = exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa,
self.output_scaffold_index, sample_fq_1, sample_fq_2, sample_fq_u,
self.output_sam)
self.assertTrue(builderr.find(_BOWTIE_EXECUTION_ERROR) is -1 and
stderr.find(_BOWTIE_EXECUTION_ERROR) is -1)
def test_build_run_bowtie_index_exists(self):
"""test that bowtie2-build subprocess call generates nonempty output index directory when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unparied fastq files"""
index_file_count = 0
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
if os.stat(self.output_index_dir) > 0:
for f in os.listdir(self.output_index_dir):
index_file_count += 1
self.assertTrue(index_file_count > 0)
def test_build_run_bowtie_output_sam_exists(self):
"""test that bowtie2-build subprocess call generates the expected output sam file when build_run_bowtie is
passed valid arguments for the bowtie file path, input index base name, output to sam path,
input forward, reverse, and unparied fastq files"""
sample_fq_1 = [self.sample_fq_1]
sample_fq_2 = [self.sample_fq_2]
sample_fq_u = []
exec_bowtie.build_run_bowtie(self.bowtie_path, self.sample_reference_fa, self.output_scaffold_index,
sample_fq_1, sample_fq_2, sample_fq_u, self.output_sam)
self.assertTrue(os.stat(self.output_sam) > 0)
# endregion
|
mit
|
markrawlingson/SickRage
|
tornado/test/concurrent_test.py
|
122
|
13390
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
import logging
import re
import socket
import sys
import traceback
from tornado.concurrent import Future, return_future, ReturnValueIgnoredError, run_on_executor
from tornado.escape import utf8, to_unicode
from tornado import gen
from tornado.iostream import IOStream
from tornado import stack_context
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, LogTrapTestCase, bind_unused_port, gen_test
from tornado.test.util import unittest
try:
from concurrent import futures
except ImportError:
futures = None
class ReturnFutureTest(AsyncTestCase):
@return_future
def sync_future(self, callback):
callback(42)
@return_future
def async_future(self, callback):
self.io_loop.add_callback(callback, 42)
@return_future
def immediate_failure(self, callback):
1 / 0
@return_future
def delayed_failure(self, callback):
self.io_loop.add_callback(lambda: 1 / 0)
@return_future
def return_value(self, callback):
# Note that the result of both running the callback and returning
# a value (or raising an exception) is unspecified; with current
# implementations the last event prior to callback resolution wins.
return 42
@return_future
def no_result_future(self, callback):
callback()
def test_immediate_failure(self):
with self.assertRaises(ZeroDivisionError):
# The caller sees the error just like a normal function.
self.immediate_failure(callback=self.stop)
# The callback is not run because the function failed synchronously.
self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
result = self.wait()
self.assertIs(result, None)
def test_return_value(self):
with self.assertRaises(ReturnValueIgnoredError):
self.return_value(callback=self.stop)
def test_callback_kw(self):
future = self.sync_future(callback=self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_callback_positional(self):
# When the callback is passed in positionally, future_wrap shouldn't
# add another callback in the kwargs.
future = self.sync_future(self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_no_callback(self):
future = self.sync_future()
self.assertEqual(future.result(), 42)
def test_none_callback_kw(self):
# explicitly pass None as callback
future = self.sync_future(callback=None)
self.assertEqual(future.result(), 42)
def test_none_callback_pos(self):
future = self.sync_future(None)
self.assertEqual(future.result(), 42)
def test_async_future(self):
future = self.async_future()
self.assertFalse(future.done())
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
self.assertEqual(future.result(), 42)
@gen_test
def test_async_future_gen(self):
result = yield self.async_future()
self.assertEqual(result, 42)
def test_delayed_failure(self):
future = self.delayed_failure()
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
with self.assertRaises(ZeroDivisionError):
future.result()
def test_kw_only_callback(self):
@return_future
def f(**kwargs):
kwargs['callback'](42)
future = f()
self.assertEqual(future.result(), 42)
def test_error_in_callback(self):
self.sync_future(callback=lambda future: 1 / 0)
# The exception gets caught by our StackContext and will be re-raised
# when we wait.
self.assertRaises(ZeroDivisionError, self.wait)
def test_no_result_future(self):
future = self.no_result_future(self.stop)
result = self.wait()
self.assertIs(result, None)
# result of this future is undefined, but not an error
future.result()
def test_no_result_future_callback(self):
future = self.no_result_future(callback=lambda: self.stop())
result = self.wait()
self.assertIs(result, None)
future.result()
@gen_test
def test_future_traceback(self):
@return_future
@gen.engine
def f(callback):
yield gen.Task(self.io_loop.add_callback)
try:
1 / 0
except ZeroDivisionError:
self.expected_frame = traceback.extract_tb(
sys.exc_info()[2], limit=1)[0]
raise
try:
yield f()
self.fail("didn't get expected exception")
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
self.assertIn(self.expected_frame, tb)
# The following series of classes demonstrate and test various styles
# of use, with and without generators and futures.
class CapServer(TCPServer):
def handle_stream(self, stream, address):
logging.info("handle_stream")
self.stream = stream
self.stream.read_until(b"\n", self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
data = to_unicode(data)
if data == data.upper():
self.stream.write(b"error\talready capitalized\n")
else:
# data already has \n
self.stream.write(utf8("ok\t%s" % data.upper()))
self.stream.close()
class CapError(Exception):
pass
class BaseCapClient(object):
def __init__(self, port, io_loop):
self.port = port
self.io_loop = io_loop
def process_response(self, data):
status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
if status == 'ok':
return message
else:
raise CapError(message)
class ManualCapClient(BaseCapClient):
def capitalize(self, request_data, callback=None):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.future = Future()
if callback is not None:
self.future.add_done_callback(
stack_context.wrap(lambda future: callback(future.result())))
return self.future
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
try:
self.future.set_result(self.process_response(data))
except CapError as e:
self.future.set_exception(e)
class DecoratorCapClient(BaseCapClient):
@return_future
def capitalize(self, request_data, callback):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.callback = callback
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
self.callback(self.process_response(data))
class GeneratorCapClient(BaseCapClient):
@return_future
@gen.engine
def capitalize(self, request_data, callback):
logging.info('capitalize')
stream = IOStream(socket.socket(), io_loop=self.io_loop)
logging.info('connecting')
yield gen.Task(stream.connect, ('127.0.0.1', self.port))
stream.write(utf8(request_data + '\n'))
logging.info('reading')
data = yield gen.Task(stream.read_until, b'\n')
logging.info('returning')
stream.close()
callback(self.process_response(data))
class ClientTestMixin(object):
def setUp(self):
super(ClientTestMixin, self).setUp()
self.server = CapServer(io_loop=self.io_loop)
sock, port = bind_unused_port()
self.server.add_sockets([sock])
self.client = self.client_class(io_loop=self.io_loop, port=port)
def tearDown(self):
self.server.stop()
super(ClientTestMixin, self).tearDown()
def test_callback(self):
self.client.capitalize("hello", callback=self.stop)
result = self.wait()
self.assertEqual(result, "HELLO")
def test_callback_error(self):
self.client.capitalize("HELLO", callback=self.stop)
self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
def test_future(self):
future = self.client.capitalize("hello")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertEqual(future.result(), "HELLO")
def test_future_error(self):
future = self.client.capitalize("HELLO")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertRaisesRegexp(CapError, "already capitalized", future.result)
def test_generator(self):
@gen.engine
def f():
result = yield self.client.capitalize("hello")
self.assertEqual(result, "HELLO")
self.stop()
f()
self.wait()
def test_generator_error(self):
@gen.engine
def f():
with self.assertRaisesRegexp(CapError, "already capitalized"):
yield self.client.capitalize("HELLO")
self.stop()
f()
self.wait()
class ManualClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = ManualCapClient
class DecoratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = DecoratorCapClient
class GeneratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = GeneratorCapClient
@unittest.skipIf(futures is None, "concurrent.futures module not present")
class RunOnExecutorTest(AsyncTestCase):
@gen_test
def test_no_calling(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_no_args(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor()
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_io_loop(self):
class Object(object):
def __init__(self, io_loop):
self._io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(io_loop='_io_loop')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_executor(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.__executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(executor='_Object__executor')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_both(self):
class Object(object):
def __init__(self, io_loop):
self._io_loop = io_loop
self.__executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(io_loop='_io_loop', executor='_Object__executor')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
|
gpl-3.0
|
odubno/microblog
|
venv/lib/python2.7/site-packages/sqlalchemy/orm/relationships.py
|
21
|
111121
|
# orm/relationships.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`.relationship`.
"""
from __future__ import absolute_import
from .. import sql, util, exc as sa_exc, schema, log
from .util import CascadeOptions, _orm_annotate, _orm_deannotate
from . import dependency
from . import attributes
from ..sql.util import (
ClauseAdapter,
join_condition, _shallow_annotate, visit_binary_product,
_deep_deannotate, selectables_overlap
)
from ..sql import operators, expression, visitors
from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY,
StrategizedProperty, PropComparator)
from ..inspection import inspect
from . import mapper as mapperlib
import collections
def remote(expr):
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"remote": True})
def foreign(expr):
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. versionadded:: 0.8
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns(expression._clause_element_as_expr(expr),
{"foreign": True})
@log.class_logger
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
See also:
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = 'relationship'
_dependency_processor = None
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
distinct_target_key=None,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None,
info=None):
"""Provide a relationship between two mapped classes.
This corresponds to a parent-child or associative table relationship.
The constructed class is an instance of
:class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper
initialization" time, which happens only when mappers are first used,
and is assumed to be after all mappings have been constructed. This
can be used to resolve order-of-declaration and other dependency
issues, such as if ``Child`` is declared below ``Parent`` in the same
file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to
:func:`.relationship`. These string arguments are converted into
callables that evaluate the string as Python code, using the
Declarative class-registry as a namespace. This allows the lookup of
related classes to be automatic via their string name, and removes the
need to import related classes at all into the local module space::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
.. seealso::
:ref:`relationship_config_toplevel` - Full introductory and
reference documentation for :func:`.relationship`.
:ref:`orm_tutorial_relationship` - ORM tutorial introduction.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing
the target of the relationship.
:paramref:`~.relationship.argument` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
.. seealso::
:ref:`declarative_configuring_relationships` - further detail
on relationship configuration when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is typically an instance of :class:`.Table`.
In less common circumstances, the argument may also be specified
as an :class:`.Alias` construct, or even a :class:`.Join` construct.
:paramref:`~.relationship.secondary` may
also be passed as a callable function which is evaluated at
mapper initialization time. When using Declarative, it may also
be a string argument noting the name of a :class:`.Table` that is
present in the :class:`.MetaData` collection associated with the
parent-mapped :class:`.Table`.
The :paramref:`~.relationship.secondary` keyword argument is
typically applied in the case where the intermediary :class:`.Table`
is not otherwise exprssed in any direct class mapping. If the
"secondary" table is also explicitly mapped elsewhere (e.g. as in
:ref:`association_pattern`), one should consider applying the
:paramref:`~.relationship.viewonly` flag so that this
:func:`.relationship` is not used for persistence operations which
may conflict with those of the association object pattern.
.. seealso::
:ref:`relationships_many_to_many` - Reference example of "many
to many".
:ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to
many-to-many relationships.
:ref:`self_referential_many_to_many` - Specifics on using
many-to-many in a self-referential case.
:ref:`declarative_many_to_many` - Additional options when using
Declarative.
:ref:`association_pattern` - an alternative to
:paramref:`~.relationship.secondary` when composing association
table relationships, allowing additional attributes to be
specified on the association table.
:ref:`composite_secondary_join` - a lesser-used pattern which
in some cases can enable complex :func:`.relationship` SQL
conditions to be used.
.. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works
more effectively when referring to a :class:`.Join` instance.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`.backref` object to control the configuration of the
new relationship.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.back_populates` - alternative form
of backref specification.
:func:`.backref` - allows control over :func:`.relationship`
configuration when using :paramref:`~.relationship.backref`.
:param back_populates:
Takes a string name and has the same meaning as
:paramref:`~.relationship.backref`, except the complementing
property is **not** created automatically, and instead must be
configured explicitly on the other mapper. The complementing
property should also indicate
:paramref:`~.relationship.back_populates` to this relationship to
ensure proper functioning.
.. seealso::
:ref:`relationships_backref` - Introductory documentation and
examples.
:paramref:`~.relationship.backref` - alternative form
of backref specification.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used - this default cascade is ``"save-update, merge"``.
The available cascades are ``save-update``, ``merge``,
``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``.
An additional option, ``all`` indicates shorthand for
``"save-update, merge, refresh-expire,
expunge, delete"``, and is often used as in ``"all, delete-orphan"``
to indicate that related objects should follow along with the
parent object in all cases, and be deleted when de-associated.
.. seealso::
:ref:`unitofwork_cascades` - Full detail on each of the available
cascade options.
:ref:`tutorial_delete_cascade` - Tutorial example describing
a delete cascade.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``, the attribute managed by this relationship
will not cascade an incoming transient object into the session of a
persistent parent, if the event is received via backref.
.. seealso::
:ref:`backref_cascade` - Full discussion and examples on how
the :paramref:`~.relationship.cascade_backrefs` option is used.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
.. seealso::
:ref:`custom_collections` - Introductory documentation and
examples.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator`
which provides custom SQL clause generation for comparison
operations.
.. seealso::
:class:`.PropComparator` - some detail on redefining comparators
at this level.
:ref:`custom_comparators` - Brief intro to this feature.
:param distinct_target_key=None:
Indicate if a "subquery" eager load should apply the DISTINCT
keyword to the innermost SELECT statement. When left as ``None``,
the DISTINCT keyword will be applied in those cases when the target
columns do not comprise the full primary key of the target table.
When set to ``True``, the DISTINCT keyword is applied to the
innermost SELECT unconditionally.
It may be desirable to set this flag to False when the DISTINCT is
reducing performance of the innermost subquery beyond that of what
duplicate innermost rows may be causing.
.. versionadded:: 0.8.3 -
:paramref:`~.relationship.distinct_target_key` allows the
subquery eager loader to apply a DISTINCT modifier to the
innermost SELECT.
.. versionchanged:: 0.9.0 -
:paramref:`~.relationship.distinct_target_key` now defaults to
``None``, so that the feature enables itself automatically for
those cases where the innermost query targets a non-unique
key.
.. seealso::
:ref:`loading_toplevel` - includes an introduction to subquery
eager loading.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
.. deprecated:: 0.7 Please see :class:`.AttributeEvents`.
:param foreign_keys:
a list of columns which are to be used as "foreign key"
columns, or columns which refer to the value in a remote
column, within the context of this :func:`.relationship`
object's :paramref:`~.relationship.primaryjoin` condition.
That is, if the :paramref:`~.relationship.primaryjoin`
condition of this :func:`.relationship` is ``a.id ==
b.a_id``, and the values in ``b.a_id`` are required to be
present in ``a.id``, then the "foreign key" column of this
:func:`.relationship` is ``b.a_id``.
In normal cases, the :paramref:`~.relationship.foreign_keys`
parameter is **not required.** :func:`.relationship` will
automatically determine which columns in the
:paramref:`~.relationship.primaryjoin` conditition are to be
considered "foreign key" columns based on those
:class:`.Column` objects that specify :class:`.ForeignKey`,
or are otherwise listed as referencing columns in a
:class:`.ForeignKeyConstraint` construct.
:paramref:`~.relationship.foreign_keys` is only needed when:
1. There is more than one way to construct a join from the local
table to the remote table, as there are multiple foreign key
references present. Setting ``foreign_keys`` will limit the
:func:`.relationship` to consider just those columns specified
here as "foreign".
.. versionchanged:: 0.8
A multiple-foreign key join ambiguity can be resolved by
setting the :paramref:`~.relationship.foreign_keys`
parameter alone, without the need to explicitly set
:paramref:`~.relationship.primaryjoin` as well.
2. The :class:`.Table` being mapped does not actually have
:class:`.ForeignKey` or :class:`.ForeignKeyConstraint`
constructs present, often because the table
was reflected from a database that does not support foreign key
reflection (MySQL MyISAM).
3. The :paramref:`~.relationship.primaryjoin` argument is used to
construct a non-standard join condition, which makes use of
columns or expressions that do not normally refer to their
"parent" column, such as a join condition expressed by a
complex comparison using a SQL function.
The :func:`.relationship` construct will raise informative
error messages that suggest the use of the
:paramref:`~.relationship.foreign_keys` parameter when
presented with an ambiguous condition. In typical cases,
if :func:`.relationship` doesn't raise any exceptions, the
:paramref:`~.relationship.foreign_keys` parameter is usually
not needed.
:paramref:`~.relationship.foreign_keys` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_foreign_keys`
:ref:`relationship_custom_foreign`
:func:`.foreign` - allows direct annotation of the "foreign"
columns within a :paramref:`~.relationship.primaryjoin` condition.
.. versionadded:: 0.8
The :func:`.foreign` annotation can also be applied
directly to the :paramref:`~.relationship.primaryjoin`
expression, which is an alternate, more specific system of
describing which columns in a particular
:paramref:`~.relationship.primaryjoin` should be considered
"foreign".
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not
nullable, or when the reference is one-to-one or a collection that
is guaranteed to have one or at least one entry.
If the joined-eager load is chained onto an existing LEFT OUTER
JOIN, ``innerjoin=True`` will be bypassed and the join will continue
to chain as LEFT OUTER JOIN so that the results don't change. As an
alternative, specify the value ``"nested"``. This will instead nest
the join on the right side, e.g. using the form "a LEFT OUTER JOIN
(b JOIN c)".
.. versionadded:: 0.9.4 Added ``innerjoin="nested"`` option to
support nesting of eager "inner" joins.
.. seealso::
:ref:`what_kind_of_loading` - Discussion of some details of
various loader options.
:paramref:`.joinedload.innerjoin` - loader option version
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
.. seealso::
:ref:`self_referential_eager_loading` - Introductory documentation
and examples.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is
first accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the
:paramref:`~.relationship.innerjoin` parameter.
* ``subquery`` - items should be loaded "eagerly" as the parents are
loaded, using one additional SQL statement, which issues a JOIN to
a subquery of the original statement, for each collection
requested.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
.. seealso::
:doc:`/orm/loading_relationships` - Full documentation on relationship loader
configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it
has never been flushed. This may take effect for a pending object
when autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection.
The :paramref:`~.relationship.load_on_pending` flag does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before a flush proceeds.
This flag is not not intended for general use.
.. seealso::
:meth:`.Session.enable_relationship_loading` - this method
establishes "load on pending" behavior for the whole object, and
also allows loading on objects that remain transient or
detached.
:param order_by:
indicates the ordering that should be applied when loading these
items. :paramref:`~.relationship.order_by` is expected to refer to
one of the :class:`.Column` objects to which the target class is
mapped, or the attribute itself bound to the target class which
refers to the column.
:paramref:`~.relationship.order_by` may also be passed as a callable
function which is evaluated at mapper initialization time, and may
be passed as a Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
.. seealso::
:ref:`passive_deletes` - Introductory documentation
and examples.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
.. seealso::
:ref:`passive_updates` - Introductory documentation and
examples.
:paramref:`.mapper.passive_updates` - a similar flag which
takes effect for joined-table inheritance mappings.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a flush operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use :paramref:`~.relationship.post_update` to "break" the cycle.
.. seealso::
:ref:`post_update` - Introductory documentation and examples.
:param primaryjoin:
a SQL expression that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or
association table).
:paramref:`~.relationship.primaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
:paramref:`.relationship.remote_side` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. versionchanged:: 0.8
The :func:`.remote` annotation can also be applied
directly to the ``primaryjoin`` expression, which is an
alternate, more specific system of describing which columns in a
particular ``primaryjoin`` should be considered "remote".
.. seealso::
:ref:`self_referential` - in-depth explanation of how
:paramref:`~.relationship.remote_side`
is used to configure self-referential relationships.
:func:`.remote` - an annotation function that accomplishes the
same purpose as :paramref:`~.relationship.remote_side`, typically
when a custom :paramref:`~.relationship.primaryjoin` condition
is used.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
.. seealso::
:ref:`dynamic_relationship` - Introduction to "dynamic"
relationship loaders.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association
and child tables.
:paramref:`~.relationship.secondaryjoin` may also be passed as a
callable function which is evaluated at mapper initialization time,
and may be passed as a Python-evaluable string when using
Declarative.
.. seealso::
:ref:`relationship_primaryjoin`
:param single_parent:
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its usage
is optional, except for :func:`.relationship` constructs which
are many-to-one or many-to-many and also
specify the ``delete-orphan`` cascade option. The
:func:`.relationship` construct itself will raise an error
instructing when this option is required.
.. seealso::
:ref:`unitofwork_cascades` - includes detail on when the
:paramref:`~.relationship.single_parent` flag may be appropriate.
:param uselist:
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by :func:`.relationship` at mapper configuration
time, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set :paramref:`~.relationship.uselist` to
False.
The :paramref:`~.relationship.uselist` flag is also available on an
existing :func:`.relationship` construct as a read-only attribute,
which can be used to determine if this :func:`.relationship` deals
with collections or scalar attributes::
>>> User.addresses.property.uselist
True
.. seealso::
:ref:`relationships_one_to_one` - Introduction to the "one to
one" relationship pattern, which is typically when the
:paramref:`~.relationship.uselist` flag is needed.
:param viewonly=False:
when set to True, the relationship is used only for loading objects,
and not for any persistence operation. A :func:`.relationship`
which specifies :paramref:`~.relationship.viewonly` can work
with a wider range of SQL operations within the
:paramref:`~.relationship.primaryjoin` condition, including
operations that feature the use of a variety of comparison operators
as well as SQL functions such as :func:`~.sql.expression.cast`. The
:paramref:`~.relationship.viewonly` flag is also of general use when
defining any kind of :func:`~.relationship` that doesn't represent
the full set of related objects, to prevent modifications of the
collection from resulting in persistence operations.
"""
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if info is not None:
self.info = info
if strategy_class:
self.strategy_class = strategy_class
else:
self.strategy_class = self._strategy_lookup(("lazy", self.lazy))
self._reverse_property = set()
self.cascade = cascade if cascade is not False \
else "save-update, merge"
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
See also:
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
_of_type = None
def __init__(
self, prop, parentmapper, adapt_to_entity=None, of_type=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(self.property, self._parentmapper,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type)
@util.memoized_property
def mapper(self):
"""The target :class:`.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`.relationship`.
"""
return self.property.mapper
@util.memoized_property
def _parententity(self):
return self.property.parent
def _source_selectable(self):
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self):
adapt_from = self._source_selectable()
if self._of_type:
of_type = inspect(self._of_type).mapper
else:
of_type = None
pj, sj, source, dest, \
secondary, target_adapter = self.property._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type=of_type)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self._parentmapper,
adapt_to_entity=self._adapt_to_entity,
of_type=cls)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple '
'many-to-one, use in_() against '
'the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership.")
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
info = inspect(self._of_type)
target_mapper, to_selectable, is_aliased_class = \
info.mapper, info.selectable, info.is_aliased_class
if self.property._is_self_referential and not \
is_aliased_class:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(
dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter and not \
is_aliased_class:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate(
{'no_replacement_traverse': True})
crit = j & sql.True_._ifnone(criterion)
ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest)
if secondary is not None:
ex = ex.correlate_except(secondary)
return ex
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(
other, adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(
x, unique=True, callable_=lambda:
self.property.mapper.
_get_committed_attr_by_column(o, col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x == y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.
mapper.
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (util.NoneType, expression.Null)):
if self.property.direction == MANYTOONE:
return _orm_annotate(~self.property._optimized_compare(
None, adapt_source=self.adapter))
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
@util.memoized_property
def property(self):
if mapperlib.Mapper._new_mappers:
mapperlib.Mapper._configure_all()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(
None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(
value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._lazy_strategy.lazy_clause(
value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(
dest_state, dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def _value_as_iterable(self, state, dict_, key,
passive=attributes.PASSIVE_OFF):
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is attributes.PASSIVE_NO_RESULT or x is None:
return []
elif hasattr(impl, 'get_collection'):
return [
(attributes.instance_state(o), o) for o in
impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(self, type_, state, dict_,
visited_states, halt_on=None):
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(state, dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self._cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _configure_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
'reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' %
(key, self, other, self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError(
'%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?' %
(other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if util.callable(self.argument) and \
not isinstance(self.argument, (type, mapperlib.Mapper)):
argument = self.argument()
else:
argument = self.argument
if isinstance(argument, type):
mapper_ = mapperlib.class_mapper(argument,
configure=False)
elif isinstance(self.argument, mapperlib.Mapper):
mapper_ = argument
else:
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(argument)))
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`.
"""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
self._lazy_strategy = self._get_strategy((("lazy", "select"),))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
def _setup_join_conditions(self):
self._join_condition = jc = JoinCondition(
parent_selectable=self.parent.mapped_table,
child_selectable=self.mapper.mapped_table,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.mapper.local_table,
primaryjoin=self.primaryjoin,
secondary=self.secondary,
secondaryjoin=self.secondaryjoin,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped
)
self.primaryjoin = jc.deannotated_primaryjoin
self.secondaryjoin = jc.deannotated_secondaryjoin
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() and not mapperlib.class_mapper(
self.parent.class_,
configure=False).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " %
(self.key, self.parent.class_.__name__,
self.parent.class_.__name__))
# check for conflicting relationship() on superclass
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _get_cascade(self):
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
def _set_cascade(self, cascade):
cascade = CascadeOptions(cascade)
if 'mapper' in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
cascade = property(_get_cascade, _set_cascade)
def _check_cascade_settings(self, cascade):
if cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
raise sa_exc.ArgumentError(
'On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
if self.passive_deletes == 'all' and \
("delete" in cascade or
"delete-orphan" in cascade):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self):
"""Interpret the 'backref' instruction to create a
:func:`.relationship` complementary to this one."""
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, util.string_types):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
check = set(mapper.iterate_to_root()).\
union(mapper.self_and_descendants)
for m in check:
if m.has_property(backref_key):
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" %
(backref_key, self, m))
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
'primaryjoin',
self._join_condition.secondaryjoin_minus_local)
sj = kwargs.pop(
'secondaryjoin',
self._join_condition.primaryjoin_minus_local)
else:
pj = kwargs.pop(
'primaryjoin',
self._join_condition.primaryjoin_reverse_remote)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent, self.secondary,
pj, sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \
self._join_condition.join_targets(
source_selectable, dest_selectable, aliased, single_crit
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (primaryjoin, secondaryjoin, source_selectable,
dest_selectable, secondary, target_adapter)
def _annotate_columns(element, annotations):
def clone(elem):
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
class JoinCondition(object):
def __init__(self,
parent_selectable,
child_selectable,
parent_local_selectable,
child_local_selectable,
primaryjoin=None,
secondary=None,
secondaryjoin=None,
parent_equivalents=None,
child_equivalents=None,
consider_as_foreign_keys=None,
local_remote_pairs=None,
remote_side=None,
self_referential=False,
prop=None,
support_sync=True,
can_be_synced_fn=lambda *c: True
):
self.parent_selectable = parent_selectable
self.parent_local_selectable = parent_local_selectable
self.child_selectable = child_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self):
if self.prop is None:
return
log = self.prop.logger
log.info('%s setup primary join %s', self.prop,
self.primaryjoin)
log.info('%s setup secondary join %s', self.prop,
self.secondaryjoin)
log.info('%s synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
log.info('%s secondary synchronize pairs [%s]', self.prop,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
log.info('%s local/remote pairs [%s]', self.prop,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
log.info('%s remote columns [%s]', self.prop,
','.join('%s' % col for col in self.remote_columns)
)
log.info('%s local columns [%s]', self.prop,
','.join('%s' % col for col in self.local_columns)
)
log.info('%s relationship direction %s', self.prop,
self.direction)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = \
join_condition(
self.child_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
else:
if self.primaryjoin is None:
self.primaryjoin = \
join_condition(
self.parent_selectable,
self.child_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys
)
except sa_exc.NoForeignKeysError:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary))
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop)
except sa_exc.AmbiguousForeignKeysError:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables."
% (self.prop, self.secondary))
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% self.prop)
@property
def primaryjoin_minus_local(self):
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self):
return _deep_deannotate(self.secondaryjoin,
values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self):
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element):
if "remote" in element._annotations:
v = element._annotations.copy()
del v['remote']
v['local'] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = element._annotations.copy()
del v['local']
v['remote'] = True
return element._with_annotations(v)
return visitors.replacement_traverse(
self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(self.primaryjoin,
values=("local", "remote"))
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause, annotation):
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self):
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self):
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self):
def check_fk(col):
if col in self.consider_as_foreign_keys:
return col._annotate({"foreign": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin,
{},
check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin,
{},
check_fk
)
def _annotate_present_fks(self):
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(a, b):
if isinstance(a, schema.Column) and \
isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
def visit_binary(binary):
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if "foreign" not in binary.left._annotations and \
"foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate(
{"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True})
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin,
{},
{"binary": visit_binary}
)
def _refers_to_parent_table(self):
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_selectable
mt = self.child_selectable
result = [False]
def visit_binary(binary):
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause) and
isinstance(f, expression.ColumnClause) and
pt.is_derived_from(c.table) and
pt.is_derived_from(f.table) and
mt.is_derived_from(c.table) and
mt.is_derived_from(f.table)
):
result[0] = True
visitors.traverse(
self.primaryjoin,
{},
{"binary": visit_binary}
)
return result[0]
def _tables_overlap(self):
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_selectable, self.child_selectable)
def _annotate_remote(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(lambda col: "foreign" in col._annotations, False)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
def repl(element):
if self.secondary.c.contains_column(element):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl)
def _annotate_selfref(self, fn, remote_side_given):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_from_args(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument.")
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element):
if element in remote_side:
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _annotate_remote_with_overlap(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary):
binary.left, binary.right = proc_left_right(binary.left,
binary.right)
binary.right, binary.left = proc_left_right(binary.right,
binary.left)
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {},
{"binary": visit_binary})
def _annotate_remote_distinct_selectables(self):
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element):
if self.child_selectable.c.contains_column(element) and \
(not self.parent_local_selectable.c.
contains_column(element) or
self.child_local_selectable.c.
contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
def _warn_non_column_elements(self):
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side."
% self.prop
)
def _annotate_local(self):
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set([l for (l, r)
in self._local_remote_pairs])
else:
local_side = util.column_set(self.parent_selectable.c)
def locals_(elem):
if "remote" not in elem._annotations and \
elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _check_remote_side(self):
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
'Relationship %s could '
'not determine any unambiguous local/remote column '
'pairs based on join condition and remote_side '
'arguments. '
'Consider using the remote() annotation to '
'accurately mark those elements of the join '
'condition that are on the remote side of '
'the relationship.' % (self.prop, ))
def _check_foreign_cols(self, join_condition, primary):
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign")
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
(not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = "Could not locate any simple equality expressions "\
"involving locally mapped foreign key columns for "\
"%s join condition "\
"'%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
" Ensure that referencing columns are associated "\
"with a ForeignKey or ForeignKeyConstraint, or are "\
"annotated in the join condition with the foreign() "\
"annotation. To allow comparison operators other than "\
"'==', the relationship can be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
err = "Could not locate any relevant foreign key columns "\
"for %s join condition '%s' on relationship %s." % (
primary and 'primary' or 'secondary',
join_condition,
self.prop
)
err += \
' Ensure that referencing columns are associated '\
'with a ForeignKey or ForeignKeyConstraint, or are '\
'annotated in the join condition with the foreign() '\
'annotation.'
raise sa_exc.ArgumentError(err)
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_selectable.c)
targetcols = util.column_set(self.child_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign")
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = set([c for c in
self._gather_columns_with_annotation(
self.primaryjoin,
"foreign")
if "remote" not in c._annotations])
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop)
def _deannotate_pairs(self, collection):
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate())
for x, y in collection]
def _setup_pairs(self):
sync_pairs = []
lrp = util.OrderedSet([])
secondary_sync_pairs = []
def go(joincond, collection):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs)
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = \
self._deannotate_pairs(secondary_sync_pairs)
@util.memoized_property
def remote_columns(self):
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self):
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self):
return self._gather_join_annotations("foreign")
@util.memoized_property
def deannotated_primaryjoin(self):
return _deep_deannotate(self.primaryjoin)
@util.memoized_property
def deannotated_secondaryjoin(self):
if self.secondaryjoin is not None:
return _deep_deannotate(self.secondaryjoin)
else:
return None
def _gather_join_annotations(self, annotation):
s = set(
self._gather_columns_with_annotation(
self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation)
)
return set([x._deannotate() for x in s])
def _gather_columns_with_annotation(self, clause, *annotation):
annotation = set(annotation)
return set([
col for col in visitors.iterate(clause, {})
if annotation.issubset(col._annotations)
])
def join_targets(self, source_selectable,
dest_selectable,
aliased,
single_crit=None):
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse': True})
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias(flat=True)
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.child_equivalents).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return primaryjoin, secondaryjoin, secondary, \
target_adapter, dest_selectable
def create_lazy_clause(self, reverse_direction=False):
binds = util.column_dict()
equated_columns = util.column_dict()
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(col):
if (
(not reverse_direction and 'local' in col._annotations) or
reverse_direction and (
(has_secondary and col in lookup) or
(not has_secondary and 'remote' in col._annotations)
)
):
if col not in binds:
binds[col] = sql.bindparam(
None, None, type_=col.type, unique=True)
return binds[col]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = dict((binds[col].key, col) for col in binds)
# this is probably not necessary
lazywhere = _deep_deannotate(lazywhere)
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations(object):
"""Seralizable equivalent to:
lambda c: "name" in c._annotations
"""
def __init__(self, name):
self.name = name
def __call__(self, c):
return self.name in c._annotations
|
bsd-3-clause
|
zhongdai/gensim
|
gensim/models/logentropy_model.py
|
88
|
4239
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import math
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.logentropy_model')
class LogEntropyModel(interfaces.TransformationABC):
"""
Objects of this class realize the transformation between word-document
co-occurence matrix (integers) into a locally/globally weighted matrix
(positive floats).
This is done by a log entropy normalization, optionally normalizing the
resulting documents to unit length. The following formulas explain how
to compute the log entropy weight for term `i` in document `j`::
local_weight_{i,j} = log(frequency_{i,j} + 1)
P_{i,j} = frequency_{i,j} / sum_j frequency_{i,j}
sum_j P_{i,j} * log(P_{i,j})
global_weight_i = 1 + ----------------------------
log(number_of_documents + 1)
final_weight_{i,j} = local_weight_{i,j} * global_weight_i
The main methods are:
1. constructor, which calculates the global weighting for all terms in
a corpus.
2. the [] method, which transforms a simple count representation into the
log entropy normalized space.
>>> log_ent = LogEntropyModel(corpus)
>>> print(log_ent[some_doc])
>>> log_ent.save('/tmp/foo.log_ent_model')
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus, id2word=None, normalize=True):
"""
`normalize` dictates whether the resulting vectors will be
set to unit length.
"""
self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "LogEntropyModel(n_docs=%s, n_words=%s)" % (self.n_docs,
self.n_words)
def initialize(self, corpus):
"""
Initialize internal statistics based on a training corpus. Called
automatically from the constructor.
"""
logger.info("calculating counts")
glob_freq = {}
glob_num_words, doc_no = 0, -1
for doc_no, bow in enumerate(corpus):
if doc_no % 10000 == 0:
logger.info("PROGRESS: processing document #%i" % doc_no)
glob_num_words += len(bow)
for term_id, term_count in bow:
glob_freq[term_id] = glob_freq.get(term_id, 0) + term_count
# keep some stats about the training corpus
self.n_docs = doc_no + 1
self.n_words = glob_num_words
# and finally compute the global weights
logger.info("calculating global log entropy weights for %i "
"documents and %i features (%i matrix non-zeros)"
% (self.n_docs, len(glob_freq), self.n_words))
logger.debug('iterating over corpus')
for doc_no2, bow in enumerate(corpus):
for key, freq in bow:
p = (float(freq) / glob_freq[key]) * math.log(float(freq) /
glob_freq[key])
self.entr[key] = self.entr.get(key, 0.0) + p
if doc_no2 != doc_no:
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = 1 + self.entr[key] / math.log(self.n_docs + 1)
def __getitem__(self, bow):
"""
Return log entropy representation of the input vector and/or corpus.
"""
# if the input vector is in fact a corpus, return a transformed corpus
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge)
vector = [(term_id, math.log(tf + 1) * self.entr.get(term_id))
for term_id, tf in bow if term_id in self.entr]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
|
gpl-3.0
|
willingc/oh-mainline
|
vendor/packages/docutils/docutils/utils/math/tex2unichar.py
|
120
|
35109
|
# -*- coding: utf-8 -*-
# LaTeX math to Unicode symbols translation dictionaries.
# Generated with ``write_tex2unichar.py`` from the data in
# http://milde.users.sourceforge.net/LUCR/Math/
# Includes commands from: wasysym, stmaryrd, mathdots, mathabx, esint, bbold, amsxtra, amsmath, amssymb, standard LaTeX
mathaccent = {
'acute': u'\u0301', # x́ COMBINING ACUTE ACCENT
'bar': u'\u0304', # x̄ COMBINING MACRON
'breve': u'\u0306', # x̆ COMBINING BREVE
'check': u'\u030c', # x̌ COMBINING CARON
'ddddot': u'\u20dc', # x⃜ COMBINING FOUR DOTS ABOVE
'dddot': u'\u20db', # x⃛ COMBINING THREE DOTS ABOVE
'ddot': u'\u0308', # ẍ COMBINING DIAERESIS
'dot': u'\u0307', # ẋ COMBINING DOT ABOVE
'grave': u'\u0300', # x̀ COMBINING GRAVE ACCENT
'hat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'mathring': u'\u030a', # x̊ COMBINING RING ABOVE
'not': u'\u0338', # x̸ COMBINING LONG SOLIDUS OVERLAY
'overleftarrow': u'\u20d6', # x⃖ COMBINING LEFT ARROW ABOVE
'overleftrightarrow': u'\u20e1', # x⃡ COMBINING LEFT RIGHT ARROW ABOVE
'overline': u'\u0305', # x̅ COMBINING OVERLINE
'overrightarrow': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'tilde': u'\u0303', # x̃ COMBINING TILDE
'underbar': u'\u0331', # x̱ COMBINING MACRON BELOW
'underleftarrow': u'\u20ee', # x⃮ COMBINING LEFT ARROW BELOW
'underline': u'\u0332', # x̲ COMBINING LOW LINE
'underrightarrow': u'\u20ef', # x⃯ COMBINING RIGHT ARROW BELOW
'vec': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'widehat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'widetilde': u'\u0303', # x̃ COMBINING TILDE
}
mathalpha = {
'Bbbk': u'\U0001d55c', # 𝕜 MATHEMATICAL DOUBLE-STRUCK SMALL K
'Delta': u'\u0394', # Δ GREEK CAPITAL LETTER DELTA
'Gamma': u'\u0393', # Γ GREEK CAPITAL LETTER GAMMA
'Im': u'\u2111', # ℑ BLACK-LETTER CAPITAL I
'Lambda': u'\u039b', # Λ GREEK CAPITAL LETTER LAMDA
'Omega': u'\u03a9', # Ω GREEK CAPITAL LETTER OMEGA
'Phi': u'\u03a6', # Φ GREEK CAPITAL LETTER PHI
'Pi': u'\u03a0', # Π GREEK CAPITAL LETTER PI
'Psi': u'\u03a8', # Ψ GREEK CAPITAL LETTER PSI
'Re': u'\u211c', # ℜ BLACK-LETTER CAPITAL R
'Sigma': u'\u03a3', # Σ GREEK CAPITAL LETTER SIGMA
'Theta': u'\u0398', # Θ GREEK CAPITAL LETTER THETA
'Upsilon': u'\u03a5', # Υ GREEK CAPITAL LETTER UPSILON
'Xi': u'\u039e', # Ξ GREEK CAPITAL LETTER XI
'aleph': u'\u2135', # ℵ ALEF SYMBOL
'alpha': u'\u03b1', # α GREEK SMALL LETTER ALPHA
'beta': u'\u03b2', # β GREEK SMALL LETTER BETA
'beth': u'\u2136', # ℶ BET SYMBOL
'chi': u'\u03c7', # χ GREEK SMALL LETTER CHI
'daleth': u'\u2138', # ℸ DALET SYMBOL
'delta': u'\u03b4', # δ GREEK SMALL LETTER DELTA
'digamma': u'\u03dc', # Ϝ GREEK LETTER DIGAMMA
'ell': u'\u2113', # ℓ SCRIPT SMALL L
'epsilon': u'\u03f5', # ϵ GREEK LUNATE EPSILON SYMBOL
'eta': u'\u03b7', # η GREEK SMALL LETTER ETA
'eth': u'\xf0', # ð LATIN SMALL LETTER ETH
'gamma': u'\u03b3', # γ GREEK SMALL LETTER GAMMA
'gimel': u'\u2137', # ℷ GIMEL SYMBOL
'hbar': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'hslash': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'imath': u'\u0131', # ı LATIN SMALL LETTER DOTLESS I
'iota': u'\u03b9', # ι GREEK SMALL LETTER IOTA
'jmath': u'\u0237', # ȷ LATIN SMALL LETTER DOTLESS J
'kappa': u'\u03ba', # κ GREEK SMALL LETTER KAPPA
'lambda': u'\u03bb', # λ GREEK SMALL LETTER LAMDA
'mu': u'\u03bc', # μ GREEK SMALL LETTER MU
'nu': u'\u03bd', # ν GREEK SMALL LETTER NU
'omega': u'\u03c9', # ω GREEK SMALL LETTER OMEGA
'phi': u'\u03d5', # ϕ GREEK PHI SYMBOL
'pi': u'\u03c0', # π GREEK SMALL LETTER PI
'psi': u'\u03c8', # ψ GREEK SMALL LETTER PSI
'rho': u'\u03c1', # ρ GREEK SMALL LETTER RHO
'sigma': u'\u03c3', # σ GREEK SMALL LETTER SIGMA
'tau': u'\u03c4', # τ GREEK SMALL LETTER TAU
'theta': u'\u03b8', # θ GREEK SMALL LETTER THETA
'upsilon': u'\u03c5', # υ GREEK SMALL LETTER UPSILON
'varDelta': u'\U0001d6e5', # 𝛥 MATHEMATICAL ITALIC CAPITAL DELTA
'varGamma': u'\U0001d6e4', # 𝛤 MATHEMATICAL ITALIC CAPITAL GAMMA
'varLambda': u'\U0001d6ec', # 𝛬 MATHEMATICAL ITALIC CAPITAL LAMDA
'varOmega': u'\U0001d6fa', # 𝛺 MATHEMATICAL ITALIC CAPITAL OMEGA
'varPhi': u'\U0001d6f7', # 𝛷 MATHEMATICAL ITALIC CAPITAL PHI
'varPi': u'\U0001d6f1', # 𝛱 MATHEMATICAL ITALIC CAPITAL PI
'varPsi': u'\U0001d6f9', # 𝛹 MATHEMATICAL ITALIC CAPITAL PSI
'varSigma': u'\U0001d6f4', # 𝛴 MATHEMATICAL ITALIC CAPITAL SIGMA
'varTheta': u'\U0001d6e9', # 𝛩 MATHEMATICAL ITALIC CAPITAL THETA
'varUpsilon': u'\U0001d6f6', # 𝛶 MATHEMATICAL ITALIC CAPITAL UPSILON
'varXi': u'\U0001d6ef', # 𝛯 MATHEMATICAL ITALIC CAPITAL XI
'varepsilon': u'\u03b5', # ε GREEK SMALL LETTER EPSILON
'varkappa': u'\U0001d718', # 𝜘 MATHEMATICAL ITALIC KAPPA SYMBOL
'varphi': u'\u03c6', # φ GREEK SMALL LETTER PHI
'varpi': u'\u03d6', # ϖ GREEK PI SYMBOL
'varrho': u'\u03f1', # ϱ GREEK RHO SYMBOL
'varsigma': u'\u03c2', # ς GREEK SMALL LETTER FINAL SIGMA
'vartheta': u'\u03d1', # ϑ GREEK THETA SYMBOL
'wp': u'\u2118', # ℘ SCRIPT CAPITAL P
'xi': u'\u03be', # ξ GREEK SMALL LETTER XI
'zeta': u'\u03b6', # ζ GREEK SMALL LETTER ZETA
}
mathbin = {
'Cap': u'\u22d2', # ⋒ DOUBLE INTERSECTION
'Circle': u'\u25cb', # ○ WHITE CIRCLE
'Cup': u'\u22d3', # ⋓ DOUBLE UNION
'LHD': u'\u25c0', # ◀ BLACK LEFT-POINTING TRIANGLE
'RHD': u'\u25b6', # ▶ BLACK RIGHT-POINTING TRIANGLE
'amalg': u'\u2a3f', # ⨿ AMALGAMATION OR COPRODUCT
'ast': u'\u2217', # ∗ ASTERISK OPERATOR
'barwedge': u'\u22bc', # ⊼ NAND
'bigtriangledown': u'\u25bd', # ▽ WHITE DOWN-POINTING TRIANGLE
'bigtriangleup': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'bindnasrepma': u'\u214b', # ⅋ TURNED AMPERSAND
'blacklozenge': u'\u29eb', # ⧫ BLACK LOZENGE
'blacktriangledown': u'\u25be', # ▾ BLACK DOWN-POINTING SMALL TRIANGLE
'blacktriangleleft': u'\u25c2', # ◂ BLACK LEFT-POINTING SMALL TRIANGLE
'blacktriangleright': u'\u25b8', # ▸ BLACK RIGHT-POINTING SMALL TRIANGLE
'blacktriangleup': u'\u25b4', # ▴ BLACK UP-POINTING SMALL TRIANGLE
'boxast': u'\u29c6', # ⧆ SQUARED ASTERISK
'boxbar': u'\u25eb', # ◫ WHITE SQUARE WITH VERTICAL BISECTING LINE
'boxbox': u'\u29c8', # ⧈ SQUARED SQUARE
'boxbslash': u'\u29c5', # ⧅ SQUARED FALLING DIAGONAL SLASH
'boxcircle': u'\u29c7', # ⧇ SQUARED SMALL CIRCLE
'boxdot': u'\u22a1', # ⊡ SQUARED DOT OPERATOR
'boxminus': u'\u229f', # ⊟ SQUARED MINUS
'boxplus': u'\u229e', # ⊞ SQUARED PLUS
'boxslash': u'\u29c4', # ⧄ SQUARED RISING DIAGONAL SLASH
'boxtimes': u'\u22a0', # ⊠ SQUARED TIMES
'bullet': u'\u2219', # ∙ BULLET OPERATOR
'cap': u'\u2229', # ∩ INTERSECTION
'cdot': u'\u22c5', # ⋅ DOT OPERATOR
'circ': u'\u2218', # ∘ RING OPERATOR
'circledast': u'\u229b', # ⊛ CIRCLED ASTERISK OPERATOR
'circledcirc': u'\u229a', # ⊚ CIRCLED RING OPERATOR
'circleddash': u'\u229d', # ⊝ CIRCLED DASH
'cup': u'\u222a', # ∪ UNION
'curlyvee': u'\u22ce', # ⋎ CURLY LOGICAL OR
'curlywedge': u'\u22cf', # ⋏ CURLY LOGICAL AND
'dagger': u'\u2020', # † DAGGER
'ddagger': u'\u2021', # ‡ DOUBLE DAGGER
'diamond': u'\u22c4', # ⋄ DIAMOND OPERATOR
'div': u'\xf7', # ÷ DIVISION SIGN
'divideontimes': u'\u22c7', # ⋇ DIVISION TIMES
'dotplus': u'\u2214', # ∔ DOT PLUS
'doublebarwedge': u'\u2a5e', # ⩞ LOGICAL AND WITH DOUBLE OVERBAR
'intercal': u'\u22ba', # ⊺ INTERCALATE
'interleave': u'\u2af4', # ⫴ TRIPLE VERTICAL BAR BINARY RELATION
'land': u'\u2227', # ∧ LOGICAL AND
'leftthreetimes': u'\u22cb', # ⋋ LEFT SEMIDIRECT PRODUCT
'lhd': u'\u25c1', # ◁ WHITE LEFT-POINTING TRIANGLE
'lor': u'\u2228', # ∨ LOGICAL OR
'ltimes': u'\u22c9', # ⋉ LEFT NORMAL FACTOR SEMIDIRECT PRODUCT
'mp': u'\u2213', # ∓ MINUS-OR-PLUS SIGN
'odot': u'\u2299', # ⊙ CIRCLED DOT OPERATOR
'ominus': u'\u2296', # ⊖ CIRCLED MINUS
'oplus': u'\u2295', # ⊕ CIRCLED PLUS
'oslash': u'\u2298', # ⊘ CIRCLED DIVISION SLASH
'otimes': u'\u2297', # ⊗ CIRCLED TIMES
'pm': u'\xb1', # ± PLUS-MINUS SIGN
'rhd': u'\u25b7', # ▷ WHITE RIGHT-POINTING TRIANGLE
'rightthreetimes': u'\u22cc', # ⋌ RIGHT SEMIDIRECT PRODUCT
'rtimes': u'\u22ca', # ⋊ RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT
'setminus': u'\u29f5', # ⧵ REVERSE SOLIDUS OPERATOR
'slash': u'\u2215', # ∕ DIVISION SLASH
'smallsetminus': u'\u2216', # ∖ SET MINUS
'smalltriangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'smalltriangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'smalltriangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'smalltriangleup': u'\u25b5', # ▵ WHITE UP-POINTING SMALL TRIANGLE
'sqcap': u'\u2293', # ⊓ SQUARE CAP
'sqcup': u'\u2294', # ⊔ SQUARE CUP
'sslash': u'\u2afd', # ⫽ DOUBLE SOLIDUS OPERATOR
'star': u'\u22c6', # ⋆ STAR OPERATOR
'talloblong': u'\u2afe', # ⫾ WHITE VERTICAL BAR
'times': u'\xd7', # × MULTIPLICATION SIGN
'triangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'triangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'triangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'triangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'uplus': u'\u228e', # ⊎ MULTISET UNION
'vartriangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'vee': u'\u2228', # ∨ LOGICAL OR
'veebar': u'\u22bb', # ⊻ XOR
'wedge': u'\u2227', # ∧ LOGICAL AND
'wr': u'\u2240', # ≀ WREATH PRODUCT
}
mathclose = {
'Rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'lrcorner': u'\u231f', # ⌟ BOTTOM RIGHT CORNER
'rangle': u'\u27e9', # ⟩ MATHEMATICAL RIGHT ANGLE BRACKET
'rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'rbrace': u'}', # } RIGHT CURLY BRACKET
'rbrack': u']', # ] RIGHT SQUARE BRACKET
'rceil': u'\u2309', # ⌉ RIGHT CEILING
'rfloor': u'\u230b', # ⌋ RIGHT FLOOR
'rgroup': u'\u27ef', # ⟯ MATHEMATICAL RIGHT FLATTENED PARENTHESIS
'rrbracket': u'\u27e7', # ⟧ MATHEMATICAL RIGHT WHITE SQUARE BRACKET
'rrparenthesis': u'\u2988', # ⦈ Z NOTATION RIGHT IMAGE BRACKET
'urcorner': u'\u231d', # ⌝ TOP RIGHT CORNER
'}': u'}', # } RIGHT CURLY BRACKET
}
mathfence = {
'Vert': u'\u2016', # ‖ DOUBLE VERTICAL LINE
'vert': u'|', # | VERTICAL LINE
'|': u'\u2016', # ‖ DOUBLE VERTICAL LINE
}
mathop = {
'Join': u'\u2a1d', # ⨝ JOIN
'bigcap': u'\u22c2', # ⋂ N-ARY INTERSECTION
'bigcup': u'\u22c3', # ⋃ N-ARY UNION
'biginterleave': u'\u2afc', # ⫼ LARGE TRIPLE VERTICAL BAR OPERATOR
'bigodot': u'\u2a00', # ⨀ N-ARY CIRCLED DOT OPERATOR
'bigoplus': u'\u2a01', # ⨁ N-ARY CIRCLED PLUS OPERATOR
'bigotimes': u'\u2a02', # ⨂ N-ARY CIRCLED TIMES OPERATOR
'bigsqcup': u'\u2a06', # ⨆ N-ARY SQUARE UNION OPERATOR
'biguplus': u'\u2a04', # ⨄ N-ARY UNION OPERATOR WITH PLUS
'bigvee': u'\u22c1', # ⋁ N-ARY LOGICAL OR
'bigwedge': u'\u22c0', # ⋀ N-ARY LOGICAL AND
'coprod': u'\u2210', # ∐ N-ARY COPRODUCT
'fatsemi': u'\u2a1f', # ⨟ Z NOTATION SCHEMA COMPOSITION
'fint': u'\u2a0f', # ⨏ INTEGRAL AVERAGE WITH SLASH
'iiiint': u'\u2a0c', # ⨌ QUADRUPLE INTEGRAL OPERATOR
'iiint': u'\u222d', # ∭ TRIPLE INTEGRAL
'iint': u'\u222c', # ∬ DOUBLE INTEGRAL
'int': u'\u222b', # ∫ INTEGRAL
'oiint': u'\u222f', # ∯ SURFACE INTEGRAL
'oint': u'\u222e', # ∮ CONTOUR INTEGRAL
'ointctrclockwise': u'\u2233', # ∳ ANTICLOCKWISE CONTOUR INTEGRAL
'prod': u'\u220f', # ∏ N-ARY PRODUCT
'sqint': u'\u2a16', # ⨖ QUATERNION INTEGRAL OPERATOR
'sum': u'\u2211', # ∑ N-ARY SUMMATION
'varointclockwise': u'\u2232', # ∲ CLOCKWISE CONTOUR INTEGRAL
}
mathopen = {
'Lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'langle': u'\u27e8', # ⟨ MATHEMATICAL LEFT ANGLE BRACKET
'lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'lbrace': u'{', # { LEFT CURLY BRACKET
'lbrack': u'[', # [ LEFT SQUARE BRACKET
'lceil': u'\u2308', # ⌈ LEFT CEILING
'lfloor': u'\u230a', # ⌊ LEFT FLOOR
'lgroup': u'\u27ee', # ⟮ MATHEMATICAL LEFT FLATTENED PARENTHESIS
'llbracket': u'\u27e6', # ⟦ MATHEMATICAL LEFT WHITE SQUARE BRACKET
'llcorner': u'\u231e', # ⌞ BOTTOM LEFT CORNER
'llparenthesis': u'\u2987', # ⦇ Z NOTATION LEFT IMAGE BRACKET
'ulcorner': u'\u231c', # ⌜ TOP LEFT CORNER
'{': u'{', # { LEFT CURLY BRACKET
}
mathord = {
'#': u'#', # # NUMBER SIGN
'$': u'$', # $ DOLLAR SIGN
'%': u'%', # % PERCENT SIGN
'&': u'&', # & AMPERSAND
'AC': u'\u223f', # ∿ SINE WAVE
'APLcomment': u'\u235d', # ⍝ APL FUNCTIONAL SYMBOL UP SHOE JOT
'APLdownarrowbox': u'\u2357', # ⍗ APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW
'APLinput': u'\u235e', # ⍞ APL FUNCTIONAL SYMBOL QUOTE QUAD
'APLinv': u'\u2339', # ⌹ APL FUNCTIONAL SYMBOL QUAD DIVIDE
'APLleftarrowbox': u'\u2347', # ⍇ APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW
'APLlog': u'\u235f', # ⍟ APL FUNCTIONAL SYMBOL CIRCLE STAR
'APLrightarrowbox': u'\u2348', # ⍈ APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW
'APLuparrowbox': u'\u2350', # ⍐ APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW
'Aries': u'\u2648', # ♈ ARIES
'CIRCLE': u'\u25cf', # ● BLACK CIRCLE
'CheckedBox': u'\u2611', # ☑ BALLOT BOX WITH CHECK
'Diamond': u'\u25c7', # ◇ WHITE DIAMOND
'Finv': u'\u2132', # Ⅎ TURNED CAPITAL F
'Game': u'\u2141', # ⅁ TURNED SANS-SERIF CAPITAL G
'Gemini': u'\u264a', # ♊ GEMINI
'Jupiter': u'\u2643', # ♃ JUPITER
'LEFTCIRCLE': u'\u25d6', # ◖ LEFT HALF BLACK CIRCLE
'LEFTcircle': u'\u25d0', # ◐ CIRCLE WITH LEFT HALF BLACK
'Leo': u'\u264c', # ♌ LEO
'Libra': u'\u264e', # ♎ LIBRA
'Mars': u'\u2642', # ♂ MALE SIGN
'Mercury': u'\u263f', # ☿ MERCURY
'Neptune': u'\u2646', # ♆ NEPTUNE
'Pluto': u'\u2647', # ♇ PLUTO
'RIGHTCIRCLE': u'\u25d7', # ◗ RIGHT HALF BLACK CIRCLE
'RIGHTcircle': u'\u25d1', # ◑ CIRCLE WITH RIGHT HALF BLACK
'Saturn': u'\u2644', # ♄ SATURN
'Scorpio': u'\u264f', # ♏ SCORPIUS
'Square': u'\u2610', # ☐ BALLOT BOX
'Sun': u'\u2609', # ☉ SUN
'Taurus': u'\u2649', # ♉ TAURUS
'Uranus': u'\u2645', # ♅ URANUS
'Venus': u'\u2640', # ♀ FEMALE SIGN
'XBox': u'\u2612', # ☒ BALLOT BOX WITH X
'Yup': u'\u2144', # ⅄ TURNED SANS-SERIF CAPITAL Y
'_': u'_', # _ LOW LINE
'angle': u'\u2220', # ∠ ANGLE
'aquarius': u'\u2652', # ♒ AQUARIUS
'aries': u'\u2648', # ♈ ARIES
'ast': u'*', # * ASTERISK
'backepsilon': u'\u03f6', # ϶ GREEK REVERSED LUNATE EPSILON SYMBOL
'backprime': u'\u2035', # ‵ REVERSED PRIME
'backslash': u'\\', # \ REVERSE SOLIDUS
'because': u'\u2235', # ∵ BECAUSE
'bigstar': u'\u2605', # ★ BLACK STAR
'binampersand': u'&', # & AMPERSAND
'blacklozenge': u'\u2b27', # ⬧ BLACK MEDIUM LOZENGE
'blacksmiley': u'\u263b', # ☻ BLACK SMILING FACE
'blacksquare': u'\u25fc', # ◼ BLACK MEDIUM SQUARE
'bot': u'\u22a5', # ⊥ UP TACK
'boy': u'\u2642', # ♂ MALE SIGN
'cancer': u'\u264b', # ♋ CANCER
'capricornus': u'\u2651', # ♑ CAPRICORN
'cdots': u'\u22ef', # ⋯ MIDLINE HORIZONTAL ELLIPSIS
'cent': u'\xa2', # ¢ CENT SIGN
'centerdot': u'\u2b1d', # ⬝ BLACK VERY SMALL SQUARE
'checkmark': u'\u2713', # ✓ CHECK MARK
'circlearrowleft': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'circlearrowright': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'circledR': u'\xae', # ® REGISTERED SIGN
'circledcirc': u'\u25ce', # ◎ BULLSEYE
'clubsuit': u'\u2663', # ♣ BLACK CLUB SUIT
'complement': u'\u2201', # ∁ COMPLEMENT
'dasharrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'dashleftarrow': u'\u21e0', # ⇠ LEFTWARDS DASHED ARROW
'dashrightarrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'diameter': u'\u2300', # ⌀ DIAMETER SIGN
'diamondsuit': u'\u2662', # ♢ WHITE DIAMOND SUIT
'earth': u'\u2641', # ♁ EARTH
'exists': u'\u2203', # ∃ THERE EXISTS
'female': u'\u2640', # ♀ FEMALE SIGN
'flat': u'\u266d', # ♭ MUSIC FLAT SIGN
'forall': u'\u2200', # ∀ FOR ALL
'fourth': u'\u2057', # ⁗ QUADRUPLE PRIME
'frownie': u'\u2639', # ☹ WHITE FROWNING FACE
'gemini': u'\u264a', # ♊ GEMINI
'girl': u'\u2640', # ♀ FEMALE SIGN
'heartsuit': u'\u2661', # ♡ WHITE HEART SUIT
'infty': u'\u221e', # ∞ INFINITY
'invneg': u'\u2310', # ⌐ REVERSED NOT SIGN
'jupiter': u'\u2643', # ♃ JUPITER
'ldots': u'\u2026', # … HORIZONTAL ELLIPSIS
'leftmoon': u'\u263e', # ☾ LAST QUARTER MOON
'leftturn': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'leo': u'\u264c', # ♌ LEO
'libra': u'\u264e', # ♎ LIBRA
'lnot': u'\xac', # ¬ NOT SIGN
'lozenge': u'\u25ca', # ◊ LOZENGE
'male': u'\u2642', # ♂ MALE SIGN
'maltese': u'\u2720', # ✠ MALTESE CROSS
'mathdollar': u'$', # $ DOLLAR SIGN
'measuredangle': u'\u2221', # ∡ MEASURED ANGLE
'mercury': u'\u263f', # ☿ MERCURY
'mho': u'\u2127', # ℧ INVERTED OHM SIGN
'nabla': u'\u2207', # ∇ NABLA
'natural': u'\u266e', # ♮ MUSIC NATURAL SIGN
'neg': u'\xac', # ¬ NOT SIGN
'neptune': u'\u2646', # ♆ NEPTUNE
'nexists': u'\u2204', # ∄ THERE DOES NOT EXIST
'notbackslash': u'\u2340', # ⍀ APL FUNCTIONAL SYMBOL BACKSLASH BAR
'partial': u'\u2202', # ∂ PARTIAL DIFFERENTIAL
'pisces': u'\u2653', # ♓ PISCES
'pluto': u'\u2647', # ♇ PLUTO
'pounds': u'\xa3', # £ POUND SIGN
'prime': u'\u2032', # ′ PRIME
'quarternote': u'\u2669', # ♩ QUARTER NOTE
'rightmoon': u'\u263d', # ☽ FIRST QUARTER MOON
'rightturn': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'sagittarius': u'\u2650', # ♐ SAGITTARIUS
'saturn': u'\u2644', # ♄ SATURN
'scorpio': u'\u264f', # ♏ SCORPIUS
'second': u'\u2033', # ″ DOUBLE PRIME
'sharp': u'\u266f', # ♯ MUSIC SHARP SIGN
'sim': u'~', # ~ TILDE
'slash': u'/', # / SOLIDUS
'smiley': u'\u263a', # ☺ WHITE SMILING FACE
'spadesuit': u'\u2660', # ♠ BLACK SPADE SUIT
'spddot': u'\xa8', # ¨ DIAERESIS
'sphat': u'^', # ^ CIRCUMFLEX ACCENT
'sphericalangle': u'\u2222', # ∢ SPHERICAL ANGLE
'sptilde': u'~', # ~ TILDE
'square': u'\u25fb', # ◻ WHITE MEDIUM SQUARE
'sun': u'\u263c', # ☼ WHITE SUN WITH RAYS
'taurus': u'\u2649', # ♉ TAURUS
'therefore': u'\u2234', # ∴ THEREFORE
'third': u'\u2034', # ‴ TRIPLE PRIME
'top': u'\u22a4', # ⊤ DOWN TACK
'triangleleft': u'\u25c5', # ◅ WHITE LEFT-POINTING POINTER
'triangleright': u'\u25bb', # ▻ WHITE RIGHT-POINTING POINTER
'twonotes': u'\u266b', # ♫ BEAMED EIGHTH NOTES
'uranus': u'\u2645', # ♅ URANUS
'varEarth': u'\u2641', # ♁ EARTH
'varnothing': u'\u2205', # ∅ EMPTY SET
'virgo': u'\u264d', # ♍ VIRGO
'wasylozenge': u'\u2311', # ⌑ SQUARE LOZENGE
'wasytherefore': u'\u2234', # ∴ THEREFORE
'yen': u'\xa5', # ¥ YEN SIGN
}
mathover = {
'overbrace': u'\u23de', # ⏞ TOP CURLY BRACKET
'wideparen': u'\u23dc', # ⏜ TOP PARENTHESIS
}
mathradical = {
'sqrt': u'\u221a', # √ SQUARE ROOT
'sqrt[3]': u'\u221b', # ∛ CUBE ROOT
'sqrt[4]': u'\u221c', # ∜ FOURTH ROOT
}
mathrel = {
'Bumpeq': u'\u224e', # ≎ GEOMETRICALLY EQUIVALENT TO
'Doteq': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'Downarrow': u'\u21d3', # ⇓ DOWNWARDS DOUBLE ARROW
'Leftarrow': u'\u21d0', # ⇐ LEFTWARDS DOUBLE ARROW
'Leftrightarrow': u'\u21d4', # ⇔ LEFT RIGHT DOUBLE ARROW
'Lleftarrow': u'\u21da', # ⇚ LEFTWARDS TRIPLE ARROW
'Longleftarrow': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'Longleftrightarrow': u'\u27fa', # ⟺ LONG LEFT RIGHT DOUBLE ARROW
'Longmapsfrom': u'\u27fd', # ⟽ LONG LEFTWARDS DOUBLE ARROW FROM BAR
'Longmapsto': u'\u27fe', # ⟾ LONG RIGHTWARDS DOUBLE ARROW FROM BAR
'Longrightarrow': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'Lsh': u'\u21b0', # ↰ UPWARDS ARROW WITH TIP LEFTWARDS
'Mapsfrom': u'\u2906', # ⤆ LEFTWARDS DOUBLE ARROW FROM BAR
'Mapsto': u'\u2907', # ⤇ RIGHTWARDS DOUBLE ARROW FROM BAR
'Rightarrow': u'\u21d2', # ⇒ RIGHTWARDS DOUBLE ARROW
'Rrightarrow': u'\u21db', # ⇛ RIGHTWARDS TRIPLE ARROW
'Rsh': u'\u21b1', # ↱ UPWARDS ARROW WITH TIP RIGHTWARDS
'Subset': u'\u22d0', # ⋐ DOUBLE SUBSET
'Supset': u'\u22d1', # ⋑ DOUBLE SUPERSET
'Uparrow': u'\u21d1', # ⇑ UPWARDS DOUBLE ARROW
'Updownarrow': u'\u21d5', # ⇕ UP DOWN DOUBLE ARROW
'VDash': u'\u22ab', # ⊫ DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'Vdash': u'\u22a9', # ⊩ FORCES
'Vvdash': u'\u22aa', # ⊪ TRIPLE VERTICAL BAR RIGHT TURNSTILE
'apprge': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'apprle': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'approx': u'\u2248', # ≈ ALMOST EQUAL TO
'approxeq': u'\u224a', # ≊ ALMOST EQUAL OR EQUAL TO
'asymp': u'\u224d', # ≍ EQUIVALENT TO
'backsim': u'\u223d', # ∽ REVERSED TILDE
'backsimeq': u'\u22cd', # ⋍ REVERSED TILDE EQUALS
'barin': u'\u22f6', # ⋶ ELEMENT OF WITH OVERBAR
'barleftharpoon': u'\u296b', # ⥫ LEFTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'barrightharpoon': u'\u296d', # ⥭ RIGHTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'between': u'\u226c', # ≬ BETWEEN
'bowtie': u'\u22c8', # ⋈ BOWTIE
'bumpeq': u'\u224f', # ≏ DIFFERENCE BETWEEN
'circeq': u'\u2257', # ≗ RING EQUAL TO
'coloneq': u'\u2254', # ≔ COLON EQUALS
'cong': u'\u2245', # ≅ APPROXIMATELY EQUAL TO
'corresponds': u'\u2259', # ≙ ESTIMATES
'curlyeqprec': u'\u22de', # ⋞ EQUAL TO OR PRECEDES
'curlyeqsucc': u'\u22df', # ⋟ EQUAL TO OR SUCCEEDS
'curvearrowleft': u'\u21b6', # ↶ ANTICLOCKWISE TOP SEMICIRCLE ARROW
'curvearrowright': u'\u21b7', # ↷ CLOCKWISE TOP SEMICIRCLE ARROW
'dashv': u'\u22a3', # ⊣ LEFT TACK
'ddots': u'\u22f1', # ⋱ DOWN RIGHT DIAGONAL ELLIPSIS
'dlsh': u'\u21b2', # ↲ DOWNWARDS ARROW WITH TIP LEFTWARDS
'doteq': u'\u2250', # ≐ APPROACHES THE LIMIT
'doteqdot': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'downarrow': u'\u2193', # ↓ DOWNWARDS ARROW
'downdownarrows': u'\u21ca', # ⇊ DOWNWARDS PAIRED ARROWS
'downdownharpoons': u'\u2965', # ⥥ DOWNWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'downharpoonleft': u'\u21c3', # ⇃ DOWNWARDS HARPOON WITH BARB LEFTWARDS
'downharpoonright': u'\u21c2', # ⇂ DOWNWARDS HARPOON WITH BARB RIGHTWARDS
'downuparrows': u'\u21f5', # ⇵ DOWNWARDS ARROW LEFTWARDS OF UPWARDS ARROW
'downupharpoons': u'\u296f', # ⥯ DOWNWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'drsh': u'\u21b3', # ↳ DOWNWARDS ARROW WITH TIP RIGHTWARDS
'eqcirc': u'\u2256', # ≖ RING IN EQUAL TO
'eqcolon': u'\u2255', # ≕ EQUALS COLON
'eqsim': u'\u2242', # ≂ MINUS TILDE
'eqslantgtr': u'\u2a96', # ⪖ SLANTED EQUAL TO OR GREATER-THAN
'eqslantless': u'\u2a95', # ⪕ SLANTED EQUAL TO OR LESS-THAN
'equiv': u'\u2261', # ≡ IDENTICAL TO
'fallingdotseq': u'\u2252', # ≒ APPROXIMATELY EQUAL TO OR THE IMAGE OF
'frown': u'\u2322', # ⌢ FROWN
'ge': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geq': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geqq': u'\u2267', # ≧ GREATER-THAN OVER EQUAL TO
'geqslant': u'\u2a7e', # ⩾ GREATER-THAN OR SLANTED EQUAL TO
'gets': u'\u2190', # ← LEFTWARDS ARROW
'gg': u'\u226b', # ≫ MUCH GREATER-THAN
'ggcurly': u'\u2abc', # ⪼ DOUBLE SUCCEEDS
'ggg': u'\u22d9', # ⋙ VERY MUCH GREATER-THAN
'gnapprox': u'\u2a8a', # ⪊ GREATER-THAN AND NOT APPROXIMATE
'gneq': u'\u2a88', # ⪈ GREATER-THAN AND SINGLE-LINE NOT EQUAL TO
'gneqq': u'\u2269', # ≩ GREATER-THAN BUT NOT EQUAL TO
'gnsim': u'\u22e7', # ⋧ GREATER-THAN BUT NOT EQUIVALENT TO
'gtrapprox': u'\u2a86', # ⪆ GREATER-THAN OR APPROXIMATE
'gtrdot': u'\u22d7', # ⋗ GREATER-THAN WITH DOT
'gtreqless': u'\u22db', # ⋛ GREATER-THAN EQUAL TO OR LESS-THAN
'gtreqqless': u'\u2a8c', # ⪌ GREATER-THAN ABOVE DOUBLE-LINE EQUAL ABOVE LESS-THAN
'gtrless': u'\u2277', # ≷ GREATER-THAN OR LESS-THAN
'gtrsim': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'hash': u'\u22d5', # ⋕ EQUAL AND PARALLEL TO
'hookleftarrow': u'\u21a9', # ↩ LEFTWARDS ARROW WITH HOOK
'hookrightarrow': u'\u21aa', # ↪ RIGHTWARDS ARROW WITH HOOK
'iddots': u'\u22f0', # ⋰ UP RIGHT DIAGONAL ELLIPSIS
'impliedby': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'implies': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'in': u'\u2208', # ∈ ELEMENT OF
'le': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leftarrow': u'\u2190', # ← LEFTWARDS ARROW
'leftarrowtail': u'\u21a2', # ↢ LEFTWARDS ARROW WITH TAIL
'leftarrowtriangle': u'\u21fd', # ⇽ LEFTWARDS OPEN-HEADED ARROW
'leftbarharpoon': u'\u296a', # ⥪ LEFTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'leftharpoondown': u'\u21bd', # ↽ LEFTWARDS HARPOON WITH BARB DOWNWARDS
'leftharpoonup': u'\u21bc', # ↼ LEFTWARDS HARPOON WITH BARB UPWARDS
'leftleftarrows': u'\u21c7', # ⇇ LEFTWARDS PAIRED ARROWS
'leftleftharpoons': u'\u2962', # ⥢ LEFTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB DOWN
'leftrightarrow': u'\u2194', # ↔ LEFT RIGHT ARROW
'leftrightarrows': u'\u21c6', # ⇆ LEFTWARDS ARROW OVER RIGHTWARDS ARROW
'leftrightarrowtriangle': u'\u21ff', # ⇿ LEFT RIGHT OPEN-HEADED ARROW
'leftrightharpoon': u'\u294a', # ⥊ LEFT BARB UP RIGHT BARB DOWN HARPOON
'leftrightharpoons': u'\u21cb', # ⇋ LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON
'leftrightsquigarrow': u'\u21ad', # ↭ LEFT RIGHT WAVE ARROW
'leftslice': u'\u2aa6', # ⪦ LESS-THAN CLOSED BY CURVE
'leftsquigarrow': u'\u21dc', # ⇜ LEFTWARDS SQUIGGLE ARROW
'leq': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leqq': u'\u2266', # ≦ LESS-THAN OVER EQUAL TO
'leqslant': u'\u2a7d', # ⩽ LESS-THAN OR SLANTED EQUAL TO
'lessapprox': u'\u2a85', # ⪅ LESS-THAN OR APPROXIMATE
'lessdot': u'\u22d6', # ⋖ LESS-THAN WITH DOT
'lesseqgtr': u'\u22da', # ⋚ LESS-THAN EQUAL TO OR GREATER-THAN
'lesseqqgtr': u'\u2a8b', # ⪋ LESS-THAN ABOVE DOUBLE-LINE EQUAL ABOVE GREATER-THAN
'lessgtr': u'\u2276', # ≶ LESS-THAN OR GREATER-THAN
'lesssim': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'lightning': u'\u21af', # ↯ DOWNWARDS ZIGZAG ARROW
'll': u'\u226a', # ≪ MUCH LESS-THAN
'llcurly': u'\u2abb', # ⪻ DOUBLE PRECEDES
'lll': u'\u22d8', # ⋘ VERY MUCH LESS-THAN
'lnapprox': u'\u2a89', # ⪉ LESS-THAN AND NOT APPROXIMATE
'lneq': u'\u2a87', # ⪇ LESS-THAN AND SINGLE-LINE NOT EQUAL TO
'lneqq': u'\u2268', # ≨ LESS-THAN BUT NOT EQUAL TO
'lnsim': u'\u22e6', # ⋦ LESS-THAN BUT NOT EQUIVALENT TO
'longleftarrow': u'\u27f5', # ⟵ LONG LEFTWARDS ARROW
'longleftrightarrow': u'\u27f7', # ⟷ LONG LEFT RIGHT ARROW
'longmapsfrom': u'\u27fb', # ⟻ LONG LEFTWARDS ARROW FROM BAR
'longmapsto': u'\u27fc', # ⟼ LONG RIGHTWARDS ARROW FROM BAR
'longrightarrow': u'\u27f6', # ⟶ LONG RIGHTWARDS ARROW
'looparrowleft': u'\u21ab', # ↫ LEFTWARDS ARROW WITH LOOP
'looparrowright': u'\u21ac', # ↬ RIGHTWARDS ARROW WITH LOOP
'mapsfrom': u'\u21a4', # ↤ LEFTWARDS ARROW FROM BAR
'mapsto': u'\u21a6', # ↦ RIGHTWARDS ARROW FROM BAR
'mid': u'\u2223', # ∣ DIVIDES
'models': u'\u22a7', # ⊧ MODELS
'multimap': u'\u22b8', # ⊸ MULTIMAP
'nLeftarrow': u'\u21cd', # ⇍ LEFTWARDS DOUBLE ARROW WITH STROKE
'nLeftrightarrow': u'\u21ce', # ⇎ LEFT RIGHT DOUBLE ARROW WITH STROKE
'nRightarrow': u'\u21cf', # ⇏ RIGHTWARDS DOUBLE ARROW WITH STROKE
'nVDash': u'\u22af', # ⊯ NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'nVdash': u'\u22ae', # ⊮ DOES NOT FORCE
'ncong': u'\u2247', # ≇ NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
'ne': u'\u2260', # ≠ NOT EQUAL TO
'nearrow': u'\u2197', # ↗ NORTH EAST ARROW
'neq': u'\u2260', # ≠ NOT EQUAL TO
'ngeq': u'\u2271', # ≱ NEITHER GREATER-THAN NOR EQUAL TO
'ngtr': u'\u226f', # ≯ NOT GREATER-THAN
'ni': u'\u220b', # ∋ CONTAINS AS MEMBER
'nleftarrow': u'\u219a', # ↚ LEFTWARDS ARROW WITH STROKE
'nleftrightarrow': u'\u21ae', # ↮ LEFT RIGHT ARROW WITH STROKE
'nleq': u'\u2270', # ≰ NEITHER LESS-THAN NOR EQUAL TO
'nless': u'\u226e', # ≮ NOT LESS-THAN
'nmid': u'\u2224', # ∤ DOES NOT DIVIDE
'notasymp': u'\u226d', # ≭ NOT EQUIVALENT TO
'notin': u'\u2209', # ∉ NOT AN ELEMENT OF
'notowner': u'\u220c', # ∌ DOES NOT CONTAIN AS MEMBER
'notslash': u'\u233f', # ⌿ APL FUNCTIONAL SYMBOL SLASH BAR
'nparallel': u'\u2226', # ∦ NOT PARALLEL TO
'nprec': u'\u2280', # ⊀ DOES NOT PRECEDE
'npreceq': u'\u22e0', # ⋠ DOES NOT PRECEDE OR EQUAL
'nrightarrow': u'\u219b', # ↛ RIGHTWARDS ARROW WITH STROKE
'nsim': u'\u2241', # ≁ NOT TILDE
'nsubseteq': u'\u2288', # ⊈ NEITHER A SUBSET OF NOR EQUAL TO
'nsucc': u'\u2281', # ⊁ DOES NOT SUCCEED
'nsucceq': u'\u22e1', # ⋡ DOES NOT SUCCEED OR EQUAL
'nsupseteq': u'\u2289', # ⊉ NEITHER A SUPERSET OF NOR EQUAL TO
'ntriangleleft': u'\u22ea', # ⋪ NOT NORMAL SUBGROUP OF
'ntrianglelefteq': u'\u22ec', # ⋬ NOT NORMAL SUBGROUP OF OR EQUAL TO
'ntriangleright': u'\u22eb', # ⋫ DOES NOT CONTAIN AS NORMAL SUBGROUP
'ntrianglerighteq': u'\u22ed', # ⋭ DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL
'nvDash': u'\u22ad', # ⊭ NOT TRUE
'nvdash': u'\u22ac', # ⊬ DOES NOT PROVE
'nwarrow': u'\u2196', # ↖ NORTH WEST ARROW
'owns': u'\u220b', # ∋ CONTAINS AS MEMBER
'parallel': u'\u2225', # ∥ PARALLEL TO
'perp': u'\u27c2', # ⟂ PERPENDICULAR
'pitchfork': u'\u22d4', # ⋔ PITCHFORK
'prec': u'\u227a', # ≺ PRECEDES
'precapprox': u'\u2ab7', # ⪷ PRECEDES ABOVE ALMOST EQUAL TO
'preccurlyeq': u'\u227c', # ≼ PRECEDES OR EQUAL TO
'preceq': u'\u2aaf', # ⪯ PRECEDES ABOVE SINGLE-LINE EQUALS SIGN
'precnapprox': u'\u2ab9', # ⪹ PRECEDES ABOVE NOT ALMOST EQUAL TO
'precnsim': u'\u22e8', # ⋨ PRECEDES BUT NOT EQUIVALENT TO
'precsim': u'\u227e', # ≾ PRECEDES OR EQUIVALENT TO
'propto': u'\u221d', # ∝ PROPORTIONAL TO
'restriction': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'rightarrow': u'\u2192', # → RIGHTWARDS ARROW
'rightarrowtail': u'\u21a3', # ↣ RIGHTWARDS ARROW WITH TAIL
'rightarrowtriangle': u'\u21fe', # ⇾ RIGHTWARDS OPEN-HEADED ARROW
'rightbarharpoon': u'\u296c', # ⥬ RIGHTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'rightharpoondown': u'\u21c1', # ⇁ RIGHTWARDS HARPOON WITH BARB DOWNWARDS
'rightharpoonup': u'\u21c0', # ⇀ RIGHTWARDS HARPOON WITH BARB UPWARDS
'rightleftarrows': u'\u21c4', # ⇄ RIGHTWARDS ARROW OVER LEFTWARDS ARROW
'rightleftharpoon': u'\u294b', # ⥋ LEFT BARB DOWN RIGHT BARB UP HARPOON
'rightleftharpoons': u'\u21cc', # ⇌ RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON
'rightrightarrows': u'\u21c9', # ⇉ RIGHTWARDS PAIRED ARROWS
'rightrightharpoons': u'\u2964', # ⥤ RIGHTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB DOWN
'rightslice': u'\u2aa7', # ⪧ GREATER-THAN CLOSED BY CURVE
'rightsquigarrow': u'\u21dd', # ⇝ RIGHTWARDS SQUIGGLE ARROW
'risingdotseq': u'\u2253', # ≓ IMAGE OF OR APPROXIMATELY EQUAL TO
'searrow': u'\u2198', # ↘ SOUTH EAST ARROW
'sim': u'\u223c', # ∼ TILDE OPERATOR
'simeq': u'\u2243', # ≃ ASYMPTOTICALLY EQUAL TO
'smallfrown': u'\u2322', # ⌢ FROWN
'smallsmile': u'\u2323', # ⌣ SMILE
'smile': u'\u2323', # ⌣ SMILE
'sqsubset': u'\u228f', # ⊏ SQUARE IMAGE OF
'sqsubseteq': u'\u2291', # ⊑ SQUARE IMAGE OF OR EQUAL TO
'sqsupset': u'\u2290', # ⊐ SQUARE ORIGINAL OF
'sqsupseteq': u'\u2292', # ⊒ SQUARE ORIGINAL OF OR EQUAL TO
'subset': u'\u2282', # ⊂ SUBSET OF
'subseteq': u'\u2286', # ⊆ SUBSET OF OR EQUAL TO
'subseteqq': u'\u2ac5', # ⫅ SUBSET OF ABOVE EQUALS SIGN
'subsetneq': u'\u228a', # ⊊ SUBSET OF WITH NOT EQUAL TO
'subsetneqq': u'\u2acb', # ⫋ SUBSET OF ABOVE NOT EQUAL TO
'succ': u'\u227b', # ≻ SUCCEEDS
'succapprox': u'\u2ab8', # ⪸ SUCCEEDS ABOVE ALMOST EQUAL TO
'succcurlyeq': u'\u227d', # ≽ SUCCEEDS OR EQUAL TO
'succeq': u'\u2ab0', # ⪰ SUCCEEDS ABOVE SINGLE-LINE EQUALS SIGN
'succnapprox': u'\u2aba', # ⪺ SUCCEEDS ABOVE NOT ALMOST EQUAL TO
'succnsim': u'\u22e9', # ⋩ SUCCEEDS BUT NOT EQUIVALENT TO
'succsim': u'\u227f', # ≿ SUCCEEDS OR EQUIVALENT TO
'supset': u'\u2283', # ⊃ SUPERSET OF
'supseteq': u'\u2287', # ⊇ SUPERSET OF OR EQUAL TO
'supseteqq': u'\u2ac6', # ⫆ SUPERSET OF ABOVE EQUALS SIGN
'supsetneq': u'\u228b', # ⊋ SUPERSET OF WITH NOT EQUAL TO
'supsetneqq': u'\u2acc', # ⫌ SUPERSET OF ABOVE NOT EQUAL TO
'swarrow': u'\u2199', # ↙ SOUTH WEST ARROW
'to': u'\u2192', # → RIGHTWARDS ARROW
'trianglelefteq': u'\u22b4', # ⊴ NORMAL SUBGROUP OF OR EQUAL TO
'triangleq': u'\u225c', # ≜ DELTA EQUAL TO
'trianglerighteq': u'\u22b5', # ⊵ CONTAINS AS NORMAL SUBGROUP OR EQUAL TO
'twoheadleftarrow': u'\u219e', # ↞ LEFTWARDS TWO HEADED ARROW
'twoheadrightarrow': u'\u21a0', # ↠ RIGHTWARDS TWO HEADED ARROW
'uparrow': u'\u2191', # ↑ UPWARDS ARROW
'updownarrow': u'\u2195', # ↕ UP DOWN ARROW
'updownarrows': u'\u21c5', # ⇅ UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW
'updownharpoons': u'\u296e', # ⥮ UPWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'upharpoonleft': u'\u21bf', # ↿ UPWARDS HARPOON WITH BARB LEFTWARDS
'upharpoonright': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'upuparrows': u'\u21c8', # ⇈ UPWARDS PAIRED ARROWS
'upupharpoons': u'\u2963', # ⥣ UPWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'vDash': u'\u22a8', # ⊨ TRUE
'varpropto': u'\u221d', # ∝ PROPORTIONAL TO
'vartriangleleft': u'\u22b2', # ⊲ NORMAL SUBGROUP OF
'vartriangleright': u'\u22b3', # ⊳ CONTAINS AS NORMAL SUBGROUP
'vdash': u'\u22a2', # ⊢ RIGHT TACK
'vdots': u'\u22ee', # ⋮ VERTICAL ELLIPSIS
}
mathunder = {
'underbrace': u'\u23df', # ⏟ BOTTOM CURLY BRACKET
}
space = {
':': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'medspace': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'quad': u'\u2001', # EM QUAD
}
|
agpl-3.0
|
EmanueleCannizzaro/scons
|
test/scons-time/time/which.py
|
1
|
2703
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/time/which.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the time --which option.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-000-0.log', 0)
test.fake_logfile('foo-000-1.log', 0)
test.fake_logfile('foo-000-2.log', 0)
test.fake_logfile('foo-001-0.log', 1)
test.fake_logfile('foo-001-1.log', 1)
test.fake_logfile('foo-001-2.log', 1)
expect = """\
set key bottom left
plot '-' title "Startup" with lines lt 1, \\
'-' title "Full build" with lines lt 2, \\
'-' title "Up-to-date build" with lines lt 3
# Startup
0 %(time)s
1 %(time)s
e
# Full build
0 %(time)s
1 %(time)s
e
# Up-to-date build
0 %(time)s
1 %(time)s
e
"""
total = expect % {'time' : 11.123456}
SConscripts = expect % {'time' : 22.234567}
SCons = expect % {'time' : 33.345678}
commands = expect % {'time' : 44.456789}
test.run(arguments = 'time --fmt gnuplot --which total', stdout=total)
test.run(arguments = 'time --fmt gnuplot --which=SConscripts', stdout=SConscripts)
test.run(arguments = 'time --fmt gnuplot --which=SCons', stdout=SCons)
test.run(arguments = 'time --fmt gnuplot --which commands', stdout=commands)
expect = """\
scons-time: time: Unrecognized timer "unknown".
Type "scons-time help time" for help.
"""
test.run(arguments = 'time --fmt gnuplot --which unknown',
status = 1,
stderr = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
abrt/faf
|
src/pyfaf/actions/cleanup_unassigned.py
|
1
|
3252
|
# Copyright (C) 2016 ABRT Team
# Copyright (C) 2016 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from pyfaf.actions import Action
from pyfaf.storage.opsys import (BuildOpSysReleaseArch, Build, Package,
PackageDependency, BuildArch, BuildComponent)
from pyfaf.storage.report import ReportPackage
from pyfaf.storage.problem import ProblemOpSysRelease
from pyfaf.storage.llvm import LlvmBuild, LlvmBcFile, LlvmResultFile
class CleanupUnassigned(Action):
name = "cleanup-unassigned"
def run(self, cmdline, db) -> None:
# find all build, that are not assigned to any opsysrelease
all_builds = (db.session.query(Build)
.filter(~ db.session.query().exists().where(BuildOpSysReleaseArch.build_id == Build.id))
.yield_per(1000))
count = 0
#delete all builds and packages from them
for build in all_builds:
count += 1
q = db.session.query(Package).filter(Package.build_id == build.id)
for pkg in q.all():
self.log_info("Processing package {0}".format(pkg.nevr()))
self.delete_package(pkg, not(cmdline.force))
if cmdline.force:
db.session.query(PackageDependency).filter(PackageDependency.package_id == pkg.id).delete()
db.session.query(ReportPackage).filter(ReportPackage.installed_package_id == pkg.id).delete()
if cmdline.force:
q.delete()
db.session.query(BuildArch).filter(build.id == BuildArch.build_id).delete()
db.session.query(BuildComponent).filter(build.id == BuildComponent.build_id).delete()
db.session.query(ProblemOpSysRelease).filter(build.id
== ProblemOpSysRelease.probable_fix_build_id).delete()
q_llvm = db.session.query(LlvmBuild.build_id == build.id)
for llvm in q_llvm.all():
db.session.query(LlvmBcFile).filter(LlvmBcFile.llvmbuild_id == llvm.id).delete()
db.session.query(LlvmResultFile).filter(LlvmResultFile.llvmbuild_id == llvm.id).delete()
db.session.query(Build).filter(Build.id == build.id).delete()
if count > 1000:
db.session.flush()
count = 0
def tweak_cmdline_parser(self, parser) -> None:
parser.add_argument("-f", "--force", action="store_true",
help="delete all unassigned packages."
" Without -f acts like --dry-run.")
|
gpl-3.0
|
rfgil/MusicBooker
|
Music.py
|
1
|
8811
|
# coding=utf-8
import math
import i18n
_ = i18n.language.gettext
CHORUS_INDENTIFIER = '{CHORUS}'
ORDER_INDENTIFIER = '{ORDER}'
COMMENT_INDENTIFIER = '//'
class Music():
chorus = ''
chorus_chords = ''
verse = []
verse_chords = []
has_chorus = False
verses_before_chorus = 0
start_with_chorus = False
name = ''
subtitle = ''
def n_verse(self):
"""
Returns the ammount of verses this music has.
@return Integer
"""
return len(self.verse)
def insert_in_string(self, str, substr, pos):
"""
Inserts a substring in a string at a given position.
@param str String to insert substring in
@param substr Substring to insert
@param pos Position of string where substring is going to be placed
@return String
"""
if pos > len(str):
for i in range(len(str), pos):
str += ' '
return str + substr
else:
return str[:pos] + substr + str[pos:]
def is_chords(self, line):
"""
Verifies if a given string is chords or lyrics.
@param line String to analyse
@return Boolean
"""
if '#' in line:
return True
elif ' ' in line:
return True
else:
return False
def insert_chords(self, chords_line, chords_array):
"""
Creates a string with the chords and lyrics mixed, according to LaTeX song package.
@param chords_line String with lyrics
@param chords_array Array with chords on the positon they are supposed to appear in the lyrics. Other positions should be empty.
@return String
"""
chords_line = chords_line.rstrip()
for i in range(len(chords_array) - 1, -1, -1):
if chords_array[i] != '':
spaces = 0
for j in range(i):
spaces += len(chords_array[j])
chords_line = self.insert_in_string(chords_line, '\\[' + chords_array[i] + ']', i + spaces)
return chords_line
def __init__(self, name, subtitle):
my_file = open('source/' + name, encoding="utf-8")
self.name = name[0].upper() + name[1:] # Transforms the name in Proper case
self.subtitle = subtitle
self.chorus = ''
self.chorus_chords = ''
self.verse = []
self.verse_chords = []
self.has_chorus = False
self.verses_before_chorus = 0
self.start_with_chorus = False
is_chorus = False
current_verse = 0
chords_array = []
self.order = []
custom_order = ''
for line in my_file:
line_chords = line
line = line.rstrip()
if COMMENT_INDENTIFIER in line:
continue
elif self.is_chords(line_chords): # Sets a array containing all chords in the correct position
chords_array = line.split(' ')
continue
elif chords_array: # If chords_array is not empty
line_chords = self.insert_chords(line_chords, chords_array)
chords_array = []
if ORDER_INDENTIFIER in line:
custom_order = line.replace(ORDER_INDENTIFIER, '')
custom_order = custom_order.replace(':', '')
elif CHORUS_INDENTIFIER in line:
is_chorus = True
self.has_chorus = True
if current_verse == 0:
self.start_with_chorus = True
elif not line.strip(): # line is empty
if is_chorus:
is_chorus = False
else:
if len(self.verse) < current_verse + 1 or not self.verse: # Prevents error on multi empty lines
continue
else:
current_verse += 1
if not self.has_chorus: # Hasn't reached a chorus yet
self.verses_before_chorus += 1
elif is_chorus:
self.chorus += line + '\n'
self.chorus_chords += line_chords + '\n'
else:
if len(self.verse) == current_verse:
self.verse.append(line + '\n')
self.verse_chords.append(line_chords + '\n')
else:
self.verse[current_verse] += line + '\n'
self.verse_chords[current_verse] += line_chords + '\n'
my_file.close()
if custom_order:
self.order = custom_order.split(',')
for i in range(len(self.order)):
self.order[i] = self.order[i].strip()
if str(self.order[i]) != CHORUS_INDENTIFIER:
self.order[i] = int(self.order[i]) - 1
else:
for i in range(self.verses_before_chorus):
self.order += [i]
if self.has_chorus:
self.order += [CHORUS_INDENTIFIER]
repeat = self.verses_before_chorus
if repeat == 0: repeat = 1
verse_count = self.verses_before_chorus
while verse_count < len(self.verse):
for i in range(repeat):
if i + verse_count < len(self.verse):
self.order += [i + verse_count]
self.order += [CHORUS_INDENTIFIER]
verse_count += repeat
else:
for i in range(self.verses_before_chorus, len(self.verse)):
self.order += [i]
def write_tex(self, chords_file, lyrics_file):
"""
Writes the song LaTeX code to a file.
@param chords_file Chords file
@param lyrics_file Lyrics file
"""
musica_inic = "\\beginsong{%s}[by={%s}]\n\n"
musica_fim = "\\endsong\n\n"
chords_file.write(musica_inic % (self.name, self.subtitle))
lyrics_file.write(musica_inic % (self.name, self.subtitle))
for i in range(self.verses_before_chorus):
chords_file.write('\\beginverse\n' + self.verse_chords[i] + '\\endverse\n\n')
lyrics_file.write('\\beginverse\n' + self.verse[i] + '\\endverse\n\n')
if self.has_chorus:
chords_file.write('\\beginchorus\n' + self.chorus_chords + '\\endchorus\n\n')
lyrics_file.write('\\beginchorus\n' + self.chorus + '\\endchorus\n\n')
for i in range(self.verses_before_chorus, self.n_verse()):
chords_file.write('\\beginverse\n' + self.verse_chords[i] + '\\endverse\n\n')
lyrics_file.write('\\beginverse\n' + self.verse[i] + '\\endverse\n\n')
chords_file.write(musica_fim)
lyrics_file.write(musica_fim)
def writer(self, text):
"""
Creates a verse or chorus in LaTeX code bearing in mind that each slide
can display only up to 7 lines with the actual settings
@param text Verse or chorus to write in LaTeX
@return output Verse or chorus LaTeX code
"""
text = text.rstrip()
lines = text.split('\n')
number_of_slides = math.ceil(len(lines) / 7) # Each slide can display only up to 7
number_of_lines = []
# Sets how many lines are going into each slide
for slide in range(number_of_slides - 1):
number_of_lines.append(int(len(lines) / number_of_slides))
number_of_lines.append(len(lines) - len(number_of_lines)*int(len(lines) / number_of_slides))
output = ""
current_line = 0
for slide in range(number_of_slides):
output += '\\begin{frame}\n'
for i in range(number_of_lines[slide]):
output += lines[current_line] + '\n'
current_line += 1
output += '\\end{frame}\n\n'
return output
def write_presentation(self, presentation_file):
"""
Writes presentation LaTeX code to a file
@param presentation_file
"""
presentation_file.write('\n%---------- ' + self.name + ' ----------\n\n')
print(_("Printing")+ ": " + self.name)
for item in self.order:
# presentation_file.write('\\begin{frame}\n')
if item == CHORUS_INDENTIFIER:
presentation_file.write(self.writer(self.chorus))
else:
presentation_file.write(self.writer(self.verse[item]))
# presentation_file.write('\\end{frame}\n\n')
presentation_file.write('\\begin{frame}\n\\end{frame}\n\n')
|
mit
|
Azulinho/ansible
|
test/units/executor/module_common/test_module_common.py
|
83
|
4593
|
# (c) 2017, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import ansible.errors
from ansible.executor import module_common as amc
from ansible.module_utils.six import PY2
class TestStripComments(object):
def test_no_changes(self):
no_comments = u"""def some_code():
return False"""
assert amc._strip_comments(no_comments) == no_comments
def test_all_comments(self):
all_comments = u"""# This is a test
# Being as it is
# To be
"""
assert amc._strip_comments(all_comments) == u""
def test_all_whitespace(self):
# Note: Do not remove the spaces on the blank lines below. They're
# test data to show that the lines get removed despite having spaces
# on them
all_whitespace = u"""
\t\t\r\n
""" # nopep8
assert amc._strip_comments(all_whitespace) == u""
def test_somewhat_normal(self):
mixed = u"""#!/usr/bin/python
# here we go
def test(arg):
# this is a thing
thing = '# test'
return thing
# End
"""
mixed_results = u"""def test(arg):
thing = '# test'
return thing"""
assert amc._strip_comments(mixed) == mixed_results
class TestSlurp(object):
def test_slurp_nonexistent(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: False)
with pytest.raises(ansible.errors.AnsibleError):
amc._slurp('no_file')
def test_slurp_file(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: True)
m = mocker.mock_open(read_data='This is a test')
if PY2:
mocker.patch('__builtin__.open', m)
else:
mocker.patch('builtins.open', m)
assert amc._slurp('some_file') == 'This is a test'
def test_slurp_file_with_newlines(self, mocker):
mocker.patch('os.path.exists', side_effect=lambda x: True)
m = mocker.mock_open(read_data='#!/usr/bin/python\ndef test(args):\nprint("hi")\n')
if PY2:
mocker.patch('__builtin__.open', m)
else:
mocker.patch('builtins.open', m)
assert amc._slurp('some_file') == '#!/usr/bin/python\ndef test(args):\nprint("hi")\n'
@pytest.fixture
def templar():
class FakeTemplar(object):
def template(self, template_string, *args, **kwargs):
return template_string
return FakeTemplar()
class TestGetShebang(object):
"""Note: We may want to change the API of this function in the future. It isn't a great API"""
def test_no_interpreter_set(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {}, templar) == (None, u'/usr/bin/python')
def test_non_python_interpreter(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {}, templar) == (None, u'/usr/bin/ruby')
def test_interpreter_set_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/pypy'}, templar) == \
(u'#!/usr/bin/pypy', u'/usr/bin/pypy')
def test_non_python_interpreter_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {u'ansible_ruby_interpreter': u'/usr/local/bin/ruby'}, templar) == \
(u'#!/usr/local/bin/ruby', u'/usr/local/bin/ruby')
def test_with_args(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/python3'}, templar, args=('-tt', '-OO')) == \
(u'#!/usr/bin/python3 -tt -OO', u'/usr/bin/python3')
def test_python_via_env(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/env python'}, templar) == \
(u'#!/usr/bin/env python', u'/usr/bin/env python')
|
gpl-3.0
|
dgzurita/odoo
|
addons/website_mail/models/mail_thread.py
|
338
|
1454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
# TODO for trunk, remove me
class MailThread(osv.AbstractModel):
_inherit = 'mail.thread'
_columns = {
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
}
|
agpl-3.0
|
kogone/android_kernel_oneplus_msm8974
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
aethaniel/project_generator
|
project_generator/settings.py
|
1
|
2243
|
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Settings needed:
UV4
IARBUILD
PROJECT_ROOT
GCC_BIN_PATH
"""
import os
import yaml
from os.path import join, pardir, sep
from .yaml_parser import YAML_parser, _finditem
class ProjectSettings:
PROJECT_ROOT = os.environ.get('PROJECT_GENERATOR_ROOT') or join(pardir, pardir)
DEFAULT_TOOL = os.environ.get('PROJECT_GENERATOR_DEFAULT_TOOL') or 'uvision'
def __init__(self):
""" This are default enviroment settings for build tools. To override,
define them in the projects.yaml file. """
self.paths = {}
self.paths['uvision'] = os.environ.get('UV4') or join('C:', sep,
'Keil', 'UV4', 'UV4.exe')
self.paths['iar'] = os.environ.get('IARBUILD') or join(
'C:', sep, 'Program Files (x86)',
'IAR Systems', 'Embedded Workbench 7.0',
'common', 'bin', 'IarBuild.exe')
self.paths['gcc'] = os.environ.get('ARM_GCC_PATH') or ''
def load_env_settings(self, config_file):
""" Loads predefined settings if any, otherwise default used. """
settings = 0
try:
for k, v in config_file.items():
if k == 'settings':
settings = v
except KeyError:
pass
if settings:
uvision = _finditem(settings, 'uvision')
if uvision:
self.paths['uvision'] = uvision
iar = _finditem(settings, 'iar')
if iar:
self.paths['iar'] = iar
gcc = _finditem(settings, 'gcc')
if gcc:
self.paths['gcc'] = gcc
def get_env_settings(self, env_set):
return self.paths[env_set]
|
apache-2.0
|
apyrgio/synnefo
|
snf-astakos-app/astakos/api/urls.py
|
9
|
2341
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url, include
from snf_django.lib.api import api_endpoint_not_found
astakos_account_v1_0 = patterns(
'astakos.api.quotas',
url(r'^quotas/?$', 'quotas', name="astakos-api-quotas"),
url(r'^service_quotas/?$', 'service_quotas'),
url(r'^service_project_quotas/?$', 'service_project_quotas'),
url(r'^resources/?$', 'resources'),
url(r'^commissions/?$', 'commissions'),
url(r'^commissions/action/?$', 'resolve_pending_commissions'),
url(r'^commissions/(?P<serial>\d+)/?$', 'get_commission'),
url(r'^commissions/(?P<serial>\d+)/action/?$', 'serial_action'),
)
astakos_account_v1_0 += patterns(
'astakos.api.user',
url(r'^feedback/?$', 'send_feedback'),
url(r'^user_catalogs/?$', 'get_uuid_displayname_catalogs'),
)
astakos_account_v1_0 += patterns(
'astakos.api.service',
url(r'^service/user_catalogs/?$', 'get_uuid_displayname_catalogs'),
)
astakos_account_v1_0 += patterns(
'astakos.api.projects',
url(r'^projects/?$', 'projects', name='api_projects'),
url(r'^projects/memberships/?$', 'memberships', name='api_memberships'),
url(r'^projects/memberships/(?P<memb_id>\d+)/?$', 'membership',
name='api_membership'),
url(r'^projects/memberships/(?P<memb_id>\d+)/action/?$',
'membership_action', name='api_membership_action'),
url(r'^projects/(?P<project_id>[^/]+)/?$', 'project', name='api_project'),
url(r'^projects/(?P<project_id>[^/]+)/action/?$', 'project_action',
name='api_project_action'),
)
urlpatterns = patterns(
'',
url(r'^v1.0/', include(astakos_account_v1_0)),
(r'^.*', api_endpoint_not_found),
)
|
gpl-3.0
|
GenericStudent/home-assistant
|
tests/components/fritzbox/test_binary_sensor.py
|
13
|
2896
|
"""Tests for AVM Fritz!Box binary sensor component."""
from datetime import timedelta
from unittest import mock
from requests.exceptions import HTTPError
from homeassistant.components.binary_sensor import DOMAIN
from homeassistant.components.fritzbox.const import DOMAIN as FB_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from . import MOCK_CONFIG, FritzDeviceBinarySensorMock
from tests.async_mock import Mock
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake_name"
async def setup_fritzbox(hass: HomeAssistantType, config: dict):
"""Set up mock AVM Fritz!Box."""
assert await async_setup_component(hass, FB_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup(hass: HomeAssistantType, fritz: Mock):
"""Test setup of platform."""
device = FritzDeviceBinarySensorMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.state == STATE_ON
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake_name"
assert state.attributes[ATTR_DEVICE_CLASS] == "window"
async def test_is_off(hass: HomeAssistantType, fritz: Mock):
"""Test state of platform."""
device = FritzDeviceBinarySensorMock()
device.present = False
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.state == STATE_OFF
async def test_update(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceBinarySensorMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert device.update.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 2
assert fritz().login.call_count == 1
async def test_update_error(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceBinarySensorMock()
device.update.side_effect = [mock.DEFAULT, HTTPError("Boom")]
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert device.update.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 2
assert fritz().login.call_count == 2
|
apache-2.0
|
jlegendary/servo
|
python/mozlog/mozlog/structured/commandline.py
|
39
|
8909
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import optparse
from collections import defaultdict
from structuredlog import StructuredLogger, set_default_logger
import handlers
import formatters
log_formatters = {
'raw': (formatters.JSONFormatter, "Raw structured log messages"),
'unittest': (formatters.UnittestFormatter, "Unittest style output"),
'xunit': (formatters.XUnitFormatter, "xUnit compatible XML"),
'html': (formatters.HTMLFormatter, "HTML report"),
'mach': (formatters.MachFormatter, "Human-readable output"),
'tbpl': (formatters.TbplFormatter, "TBPL style log format"),
}
TEXT_FORMATTERS = ('raw', 'mach')
"""a subset of formatters for non test harnesses related applications"""
def level_filter_wrapper(formatter, level):
return handlers.LogLevelFilter(formatter, level)
def verbose_wrapper(formatter, verbose):
formatter.verbose = verbose
return formatter
def buffer_handler_wrapper(handler, buffer_limit):
if buffer_limit == "UNLIMITED":
buffer_limit = None
else:
buffer_limit = int(buffer_limit)
return handlers.BufferingLogFilter(handler, buffer_limit)
formatter_option_defaults = {
'verbose': False,
'level': 'info',
}
fmt_options = {
# <option name>: (<wrapper function>, description, <applicable formatters>, action)
# "action" is used by the commandline parser in use.
'verbose': (verbose_wrapper,
"Enables verbose mode for the given formatter.",
["mach"], "store_true"),
'level': (level_filter_wrapper,
"A least log level to subscribe to for the given formatter (debug, info, error, etc.)",
["mach", "tbpl"], "store"),
'buffer': (buffer_handler_wrapper,
"If specified, enables message buffering at the given buffer size limit.",
["mach", "tbpl"], "store"),
}
def log_file(name):
if name == "-":
return sys.stdout
# ensure we have a correct dirpath by using realpath
dirpath = os.path.dirname(os.path.realpath(name))
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return open(name, "w")
def add_logging_group(parser, include_formatters=None):
"""
Add logging options to an argparse ArgumentParser or
optparse OptionParser.
Each formatter has a corresponding option of the form --log-{name}
where {name} is the name of the formatter. The option takes a value
which is either a filename or "-" to indicate stdout.
:param parser: The ArgumentParser or OptionParser object that should have
logging options added.
:param include_formatters: List of formatter names that should be included
in the option group. Default to None, meaning
all the formatters are included. A common use
of this option is to specify
:data:`TEXT_FORMATTERS` to include only the
most useful formatters for a command line tool
that is not related to test harnesses.
"""
group_name = "Output Logging"
group_description = ("Each option represents a possible logging format "
"and takes a filename to write that format to, "
"or '-' to write to stdout.")
if include_formatters is None:
include_formatters = log_formatters.keys()
if isinstance(parser, optparse.OptionParser):
group = optparse.OptionGroup(parser,
group_name,
group_description)
parser.add_option_group(group)
opt_log_type = 'str'
group_add = group.add_option
else:
group = parser.add_argument_group(group_name,
group_description)
opt_log_type = log_file
group_add = group.add_argument
for name, (cls, help_str) in log_formatters.iteritems():
if name in include_formatters:
group_add("--log-" + name, action="append", type=opt_log_type,
help=help_str)
for optname, (cls, help_str, formatters, action) in fmt_options.iteritems():
for fmt in formatters:
# make sure fmt is in log_formatters and is accepted
if fmt in log_formatters and fmt in include_formatters:
group_add("--log-%s-%s" % (fmt, optname), action=action,
help=help_str, default=None)
def setup_handlers(logger, formatters, formatter_options):
"""
Add handlers to the given logger according to the formatters and
options provided.
:param logger: The logger configured by this function.
:param formatters: A dict of {formatter, [streams]} to use in handlers.
:param formatter_options: a dict of {formatter: {option: value}} to
to use when configuring formatters.
"""
unused_options = set(formatter_options.keys()) - set(formatters.keys())
if unused_options:
msg = ("Options specified for unused formatter(s) (%s) have no effect" %
list(unused_options))
raise ValueError(msg)
for fmt, streams in formatters.iteritems():
formatter_cls = log_formatters[fmt][0]
formatter = formatter_cls()
handler_wrapper, handler_option = None, ""
for option, value in formatter_options[fmt].iteritems():
if option == "buffer":
handler_wrapper, handler_option = fmt_options[option][0], value
else:
formatter = fmt_options[option][0](formatter, value)
for value in streams:
handler = handlers.StreamHandler(stream=value, formatter=formatter)
if handler_wrapper:
handler = handler_wrapper(handler, handler_option)
logger.add_handler(handler)
def setup_logging(suite, args, defaults=None):
"""
Configure a structuredlogger based on command line arguments.
The created structuredlogger will also be set as the default logger, and
can be retrieved with :py:func:`~mozlog.structured.structuredlog.get_default_logger`.
:param suite: The name of the testsuite being run
:param args: A dictionary of {argument_name:value} produced from
parsing the command line arguments for the application
:param defaults: A dictionary of {formatter name: output stream} to apply
when there is no logging supplied on the command line. If
this isn't supplied, reasonable defaults are chosen
(coloured mach formatting if stdout is a terminal, or raw
logs otherwise).
:rtype: StructuredLogger
"""
logger = StructuredLogger(suite)
# Keep track of any options passed for formatters.
formatter_options = defaultdict(lambda: formatter_option_defaults.copy())
# Keep track of formatters and list of streams specified.
formatters = defaultdict(list)
found = False
found_stdout_logger = False
if not hasattr(args, 'iteritems'):
args = vars(args)
if defaults is None:
if sys.__stdout__.isatty():
defaults = {"mach": sys.stdout}
else:
defaults = {"raw": sys.stdout}
for name, values in args.iteritems():
parts = name.split('_')
if len(parts) > 3:
continue
# Our args will be ['log', <formatter>] or ['log', <formatter>, <option>].
if parts[0] == 'log' and values is not None:
if len(parts) == 1 or parts[1] not in log_formatters:
continue
if len(parts) == 2:
_, formatter = parts
for value in values:
found = True
if isinstance(value, basestring):
value = log_file(value)
if value == sys.stdout:
found_stdout_logger = True
formatters[formatter].append(value)
if len(parts) == 3:
_, formatter, opt = parts
formatter_options[formatter][opt] = values
#If there is no user-specified logging, go with the default options
if not found:
for name, value in defaults.iteritems():
formatters[name].append(value)
elif not found_stdout_logger and sys.stdout in defaults.values():
for name, value in defaults.iteritems():
if value == sys.stdout:
formatters[name].append(value)
setup_handlers(logger, formatters, formatter_options)
set_default_logger(logger)
return logger
|
mpl-2.0
|
daavery/audacity
|
lib-src/lv2/lv2/plugins/eg-fifths.lv2/waflib/extras/autowaf.py
|
176
|
22430
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import glob
import os
import subprocess
import sys
from waflib import Configure,Context,Logs,Node,Options,Task,Utils
from waflib.TaskGen import feature,before,after
global g_is_child
g_is_child=False
global g_step
g_step=0
@feature('c','cxx')
@after('apply_incpaths')
def include_config_h(self):
self.env.append_value('INCPATHS',self.bld.bldnode.abspath())
def set_options(opt,debug_by_default=False):
global g_step
if g_step>0:
return
dirs_options=opt.add_option_group('Installation directories','')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
dirs_options.add_option(option)
dirs_options.add_option('--bindir',type='string',help="Executable programs [Default: PREFIX/bin]")
dirs_options.add_option('--configdir',type='string',help="Configuration data [Default: PREFIX/etc]")
dirs_options.add_option('--datadir',type='string',help="Shared data [Default: PREFIX/share]")
dirs_options.add_option('--includedir',type='string',help="Header files [Default: PREFIX/include]")
dirs_options.add_option('--libdir',type='string',help="Libraries [Default: PREFIX/lib]")
dirs_options.add_option('--mandir',type='string',help="Manual pages [Default: DATADIR/man]")
dirs_options.add_option('--docdir',type='string',help="HTML documentation [Default: DATADIR/doc]")
if debug_by_default:
opt.add_option('--optimize',action='store_false',default=True,dest='debug',help="Build optimized binaries")
else:
opt.add_option('--debug',action='store_true',default=False,dest='debug',help="Build debuggable binaries")
opt.add_option('--pardebug',action='store_true',default=False,dest='pardebug',help="Build parallel-installable debuggable libraries with D suffix")
opt.add_option('--grind',action='store_true',default=False,dest='grind',help="Run tests in valgrind")
opt.add_option('--strict',action='store_true',default=False,dest='strict',help="Use strict compiler flags and show all warnings")
opt.add_option('--ultra-strict',action='store_true',default=False,dest='ultra_strict',help="Use even stricter compiler flags (likely to trigger many warnings in library headers)")
opt.add_option('--docs',action='store_true',default=False,dest='docs',help="Build documentation - requires doxygen")
opt.add_option('--lv2-user',action='store_true',default=False,dest='lv2_user',help="Install LV2 bundles to user location")
opt.add_option('--lv2-system',action='store_true',default=False,dest='lv2_system',help="Install LV2 bundles to system location")
dirs_options.add_option('--lv2dir',type='string',help="LV2 bundles [Default: LIBDIR/lv2]")
g_step=1
def check_header(conf,lang,name,define='',mandatory=True):
includes=''
if sys.platform=="darwin":
includes='/opt/local/include'
if lang=='c':
check_func=conf.check_cc
elif lang=='cxx':
check_func=conf.check_cxx
else:
Logs.error("Unknown header language `%s'"%lang)
return
if define!='':
check_func(header_name=name,includes=includes,define_name=define,mandatory=mandatory)
else:
check_func(header_name=name,includes=includes,mandatory=mandatory)
def nameify(name):
return name.replace('/','_').replace('++','PP').replace('-','_').replace('.','_')
def define(conf,var_name,value):
conf.define(var_name,value)
conf.env[var_name]=value
def check_pkg(conf,name,**args):
if args['uselib_store'].lower()in conf.env['AUTOWAF_LOCAL_LIBS']:
return
class CheckType:
OPTIONAL=1
MANDATORY=2
var_name='CHECKED_'+nameify(args['uselib_store'])
check=not var_name in conf.env
mandatory=not'mandatory'in args or args['mandatory']
if not check and'atleast_version'in args:
checked_version=conf.env['VERSION_'+name]
if checked_version and checked_version<args['atleast_version']:
check=True;
if not check and mandatory and conf.env[var_name]==CheckType.OPTIONAL:
check=True;
if check:
found=None
pkg_var_name='PKG_'+name.replace('-','_')
pkg_name=name
if conf.env.PARDEBUG:
args['mandatory']=False
found=conf.check_cfg(package=pkg_name+'D',args="--cflags --libs",**args)
if found:
pkg_name+='D'
if mandatory:
args['mandatory']=True
if not found:
found=conf.check_cfg(package=pkg_name,args="--cflags --libs",**args)
if found:
conf.env[pkg_var_name]=pkg_name
if'atleast_version'in args:
conf.env['VERSION_'+name]=args['atleast_version']
if mandatory:
conf.env[var_name]=CheckType.MANDATORY
else:
conf.env[var_name]=CheckType.OPTIONAL
def normpath(path):
if sys.platform=='win32':
return os.path.normpath(path).replace('\\','/')
else:
return os.path.normpath(path)
def configure(conf):
global g_step
if g_step>1:
return
def append_cxx_flags(flags):
conf.env.append_value('CFLAGS',flags)
conf.env.append_value('CXXFLAGS',flags)
print('')
display_header('Global Configuration')
if Options.options.docs:
conf.load('doxygen')
conf.env['DOCS']=Options.options.docs
conf.env['DEBUG']=Options.options.debug or Options.options.pardebug
conf.env['PARDEBUG']=Options.options.pardebug
conf.env['PREFIX']=normpath(os.path.abspath(os.path.expanduser(conf.env['PREFIX'])))
def config_dir(var,opt,default):
if opt:
conf.env[var]=normpath(opt)
else:
conf.env[var]=normpath(default)
opts=Options.options
prefix=conf.env['PREFIX']
config_dir('BINDIR',opts.bindir,os.path.join(prefix,'bin'))
config_dir('SYSCONFDIR',opts.configdir,os.path.join(prefix,'etc'))
config_dir('DATADIR',opts.datadir,os.path.join(prefix,'share'))
config_dir('INCLUDEDIR',opts.includedir,os.path.join(prefix,'include'))
config_dir('LIBDIR',opts.libdir,os.path.join(prefix,'lib'))
config_dir('MANDIR',opts.mandir,os.path.join(conf.env['DATADIR'],'man'))
config_dir('DOCDIR',opts.docdir,os.path.join(conf.env['DATADIR'],'doc'))
if Options.options.lv2dir:
conf.env['LV2DIR']=Options.options.lv2dir
elif Options.options.lv2_user:
if sys.platform=="darwin":
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'Library/Audio/Plug-Ins/LV2')
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('APPDATA'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(os.getenv('HOME'),'.lv2')
elif Options.options.lv2_system:
if sys.platform=="darwin":
conf.env['LV2DIR']='/Library/Audio/Plug-Ins/LV2'
elif sys.platform=="win32":
conf.env['LV2DIR']=os.path.join(os.getenv('COMMONPROGRAMFILES'),'LV2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
else:
conf.env['LV2DIR']=os.path.join(conf.env['LIBDIR'],'lv2')
conf.env['LV2DIR']=normpath(conf.env['LV2DIR'])
if Options.options.docs:
doxygen=conf.find_program('doxygen')
if not doxygen:
conf.fatal("Doxygen is required to build with --docs")
dot=conf.find_program('dot')
if not dot:
conf.fatal("Graphviz (dot) is required to build with --docs")
if Options.options.debug:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/Od','/Zi','/MTd']
conf.env['CXXFLAGS']=['/Od','/Zi','/MTd']
conf.env['LINKFLAGS']=['/DEBUG']
else:
conf.env['CFLAGS']=['-O0','-g']
conf.env['CXXFLAGS']=['-O0','-g']
else:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS']=['/MD']
conf.env['CXXFLAGS']=['/MD']
append_cxx_flags(['-DNDEBUG'])
if Options.options.ultra_strict:
Options.options.strict=True
conf.env.append_value('CFLAGS',['-Wredundant-decls','-Wstrict-prototypes','-Wmissing-prototypes','-Wcast-qual'])
conf.env.append_value('CXXFLAGS',['-Wcast-qual'])
if Options.options.strict:
conf.env.append_value('CFLAGS',['-pedantic','-Wshadow'])
conf.env.append_value('CXXFLAGS',['-ansi','-Wnon-virtual-dtor','-Woverloaded-virtual'])
append_cxx_flags(['-Wall','-Wcast-align','-Wextra','-Wmissing-declarations','-Wno-unused-parameter','-Wstrict-overflow','-Wundef','-Wwrite-strings','-fstrict-overflow'])
if not conf.check_cc(fragment='''
#ifndef __clang__
#error
#endif
int main() { return 0; }''',features='c',mandatory=False,execute=False,msg='Checking for clang'):
append_cxx_flags(['-Wlogical-op','-Wsuggest-attribute=noreturn','-Wunsafe-loop-optimizations'])
if not conf.env['MSVC_COMPILER']:
append_cxx_flags(['-fshow-column'])
conf.env.prepend_value('CFLAGS','-I'+os.path.abspath('.'))
conf.env.prepend_value('CXXFLAGS','-I'+os.path.abspath('.'))
display_msg(conf,"Install prefix",conf.env['PREFIX'])
display_msg(conf,"Debuggable build",str(conf.env['DEBUG']))
display_msg(conf,"Build documentation",str(conf.env['DOCS']))
print('')
g_step=2
def set_c99_mode(conf):
if conf.env.MSVC_COMPILER:
conf.env.append_unique('CFLAGS',['-TP'])
else:
conf.env.append_unique('CFLAGS',['-std=c99'])
def set_local_lib(conf,name,has_objects):
var_name='HAVE_'+nameify(name.upper())
define(conf,var_name,1)
if has_objects:
if type(conf.env['AUTOWAF_LOCAL_LIBS'])!=dict:
conf.env['AUTOWAF_LOCAL_LIBS']={}
conf.env['AUTOWAF_LOCAL_LIBS'][name.lower()]=True
else:
if type(conf.env['AUTOWAF_LOCAL_HEADERS'])!=dict:
conf.env['AUTOWAF_LOCAL_HEADERS']={}
conf.env['AUTOWAF_LOCAL_HEADERS'][name.lower()]=True
def append_property(obj,key,val):
if hasattr(obj,key):
setattr(obj,key,getattr(obj,key)+val)
else:
setattr(obj,key,val)
def use_lib(bld,obj,libs):
abssrcdir=os.path.abspath('.')
libs_list=libs.split()
for l in libs_list:
in_headers=l.lower()in bld.env['AUTOWAF_LOCAL_HEADERS']
in_libs=l.lower()in bld.env['AUTOWAF_LOCAL_LIBS']
if in_libs:
append_property(obj,'use',' lib%s '%l.lower())
append_property(obj,'framework',bld.env['FRAMEWORK_'+l])
if in_headers or in_libs:
inc_flag='-iquote '+os.path.join(abssrcdir,l.lower())
for f in['CFLAGS','CXXFLAGS']:
if not inc_flag in bld.env[f]:
bld.env.prepend_value(f,inc_flag)
else:
append_property(obj,'uselib',' '+l)
@feature('c','cxx')
@before('apply_link')
def version_lib(self):
if sys.platform=='win32':
self.vnum=None
if self.env['PARDEBUG']:
applicable=['cshlib','cxxshlib','cstlib','cxxstlib']
if[x for x in applicable if x in self.features]:
self.target=self.target+'D'
def set_lib_env(conf,name,version):
'Set up environment for local library as if found via pkg-config.'
NAME=name.upper()
major_ver=version.split('.')[0]
pkg_var_name='PKG_'+name.replace('-','_')+'_'+major_ver
lib_name='%s-%s'%(name,major_ver)
if conf.env.PARDEBUG:
lib_name+='D'
conf.env[pkg_var_name]=lib_name
conf.env['INCLUDES_'+NAME]=['${INCLUDEDIR}/%s-%s'%(name,major_ver)]
conf.env['LIBPATH_'+NAME]=[conf.env.LIBDIR]
conf.env['LIB_'+NAME]=[lib_name]
def display_header(title):
Logs.pprint('BOLD',title)
def display_msg(conf,msg,status=None,color=None):
color='CYAN'
if type(status)==bool and status or status=="True":
color='GREEN'
elif type(status)==bool and not status or status=="False":
color='YELLOW'
Logs.pprint('BOLD'," *",sep='')
Logs.pprint('NORMAL',"%s"%msg.ljust(conf.line_just-3),sep='')
Logs.pprint('BOLD',":",sep='')
Logs.pprint(color,status)
def link_flags(env,lib):
return' '.join(map(lambda x:env['LIB_ST']%x,env['LIB_'+lib]))
def compile_flags(env,lib):
return' '.join(map(lambda x:env['CPPPATH_ST']%x,env['INCLUDES_'+lib]))
def set_recursive():
global g_is_child
g_is_child=True
def is_child():
global g_is_child
return g_is_child
def build_pc(bld,name,version,version_suffix,libs,subst_dict={}):
'''Build a pkg-config file for a library.
name -- uppercase variable name (e.g. 'SOMENAME')
version -- version string (e.g. '1.2.3')
version_suffix -- name version suffix (e.g. '2')
libs -- string/list of dependencies (e.g. 'LIBFOO GLIB')
'''
pkg_prefix=bld.env['PREFIX']
if pkg_prefix[-1]=='/':
pkg_prefix=pkg_prefix[:-1]
target=name.lower()
if version_suffix!='':
target+='-'+version_suffix
if bld.env['PARDEBUG']:
target+='D'
target+='.pc'
libdir=bld.env['LIBDIR']
if libdir.startswith(pkg_prefix):
libdir=libdir.replace(pkg_prefix,'${exec_prefix}')
includedir=bld.env['INCLUDEDIR']
if includedir.startswith(pkg_prefix):
includedir=includedir.replace(pkg_prefix,'${prefix}')
obj=bld(features='subst',source='%s.pc.in'%name.lower(),target=target,install_path=os.path.join(bld.env['LIBDIR'],'pkgconfig'),exec_prefix='${prefix}',PREFIX=pkg_prefix,EXEC_PREFIX='${prefix}',LIBDIR=libdir,INCLUDEDIR=includedir)
if type(libs)!=list:
libs=libs.split()
subst_dict[name+'_VERSION']=version
subst_dict[name+'_MAJOR_VERSION']=version[0:version.find('.')]
for i in libs:
subst_dict[i+'_LIBS']=link_flags(bld.env,i)
lib_cflags=compile_flags(bld.env,i)
if lib_cflags=='':
lib_cflags=' '
subst_dict[i+'_CFLAGS']=lib_cflags
obj.__dict__.update(subst_dict)
def build_dir(name,subdir):
if is_child():
return os.path.join('build',name,subdir)
else:
return os.path.join('build',subdir)
def make_simple_dox(name):
name=name.lower()
NAME=name.upper()
try:
top=os.getcwd()
os.chdir(build_dir(name,'doc/html'))
page='group__%s.html'%name
if not os.path.exists(page):
return
for i in[['%s_API '%NAME,''],['%s_DEPRECATED '%NAME,''],['group__%s.html'%name,''],[' ',''],['<script.*><\/script>',''],['<hr\/><a name="details" id="details"><\/a><h2>.*<\/h2>',''],['<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text\/css\"\/>',''],['<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"\/>','Doxygen']]:
os.system("sed -i 's/%s/%s/g' %s"%(i[0],i[1],page))
os.rename('group__%s.html'%name,'index.html')
for i in(glob.glob('*.png')+glob.glob('*.html')+glob.glob('*.js')+glob.glob('*.css')):
if i!='index.html'and i!='style.css':
os.remove(i)
os.chdir(top)
os.chdir(build_dir(name,'doc/man/man3'))
for i in glob.glob('*.3'):
os.system("sed -i 's/%s_API //' %s"%(NAME,i))
for i in glob.glob('_*'):
os.remove(i)
os.chdir(top)
except Exception ,e:
Logs.error("Failed to fix up %s documentation: %s"%(name,e))
def build_dox(bld,name,version,srcdir,blddir,outdir='',versioned=True):
if not bld.env['DOCS']:
return
if is_child():
src_dir=os.path.join(srcdir,name.lower())
doc_dir=os.path.join(blddir,name.lower(),'doc')
else:
src_dir=srcdir
doc_dir=os.path.join(blddir,'doc')
subst_tg=bld(features='subst',source='doc/reference.doxygen.in',target='doc/reference.doxygen',install_path='',name='doxyfile')
subst_dict={name+'_VERSION':version,name+'_SRCDIR':os.path.abspath(src_dir),name+'_DOC_DIR':os.path.abspath(doc_dir)}
subst_tg.__dict__.update(subst_dict)
subst_tg.post()
docs=bld(features='doxygen',doxyfile='doc/reference.doxygen')
docs.post()
outname=name.lower()
if versioned:
outname+='-%d'%int(version[0:version.find('.')])
bld.install_files(os.path.join('${DOCDIR}',outname,outdir,'html'),bld.path.get_bld().ant_glob('doc/html/*'))
for i in range(1,8):
bld.install_files('${MANDIR}/man%d'%i,bld.path.get_bld().ant_glob('doc/man/man%d/*'%i,excl='**/_*'))
def build_version_files(header_path,source_path,domain,major,minor,micro):
header_path=os.path.abspath(header_path)
source_path=os.path.abspath(source_path)
text="int "+domain+"_major_version = "+str(major)+";\n"
text+="int "+domain+"_minor_version = "+str(minor)+";\n"
text+="int "+domain+"_micro_version = "+str(micro)+";\n"
try:
o=open(source_path,'w')
o.write(text)
o.close()
except IOError:
Logs.error('Failed to open %s for writing\n'%source_path)
sys.exit(-1)
text="#ifndef __"+domain+"_version_h__\n"
text+="#define __"+domain+"_version_h__\n"
text+="extern const char* "+domain+"_revision;\n"
text+="extern int "+domain+"_major_version;\n"
text+="extern int "+domain+"_minor_version;\n"
text+="extern int "+domain+"_micro_version;\n"
text+="#endif /* __"+domain+"_version_h__ */\n"
try:
o=open(header_path,'w')
o.write(text)
o.close()
except IOError:
Logs.warn('Failed to open %s for writing\n'%header_path)
sys.exit(-1)
return None
def build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder=None):
Logs.info('Generating pot file from %s'%name)
pot_file='%s.pot'%name
cmd=['xgettext','--keyword=_','--keyword=N_','--keyword=S_','--from-code=UTF-8','-o',pot_file]
if copyright_holder:
cmd+=['--copyright-holder="%s"'%copyright_holder]
cmd+=sources
Logs.info('Updating '+pot_file)
subprocess.call(cmd,cwd=os.path.join(srcdir,dir))
def build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
cmd=['msgmerge','--update',po_file,pot_file]
Logs.info('Updating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder=None):
pwd=os.getcwd()
os.chdir(os.path.join(srcdir,dir))
pot_file='%s.pot'%name
po_files=glob.glob('po/*.po')
for po_file in po_files:
mo_file=po_file.replace('.po','.mo')
cmd=['msgfmt','-c','-f','-o',mo_file,po_file]
Logs.info('Generating '+po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n(bld,srcdir,dir,name,sources,copyright_holder=None):
build_i18n_pot(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_po(bld,srcdir,dir,name,sources,copyright_holder)
build_i18n_mo(bld,srcdir,dir,name,sources,copyright_holder)
def cd_to_build_dir(ctx,appname):
orig_dir=os.path.abspath(os.curdir)
top_level=(len(ctx.stack_path)>1)
if top_level:
os.chdir(os.path.join('build',appname))
else:
os.chdir('build')
Logs.pprint('GREEN',"Waf: Entering directory `%s'"%os.path.abspath(os.getcwd()))
def cd_to_orig_dir(ctx,child):
if child:
os.chdir(os.path.join('..','..'))
else:
os.chdir('..')
def pre_test(ctx,appname,dirs=['src']):
diropts=''
for i in dirs:
diropts+=' -d '+i
cd_to_build_dir(ctx,appname)
clear_log=open('lcov-clear.log','w')
try:
try:
subprocess.call(('lcov %s -z'%diropts).split(),stdout=clear_log,stderr=clear_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
clear_log.close()
def post_test(ctx,appname,dirs=['src'],remove=['*boost*','c++*']):
diropts=''
for i in dirs:
diropts+=' -d '+i
coverage_log=open('lcov-coverage.log','w')
coverage_lcov=open('coverage.lcov','w')
coverage_stripped_lcov=open('coverage-stripped.lcov','w')
try:
try:
base='.'
if g_is_child:
base='..'
subprocess.call(('lcov -c %s -b %s'%(diropts,base)).split(),stdout=coverage_lcov,stderr=coverage_log)
subprocess.call(['lcov','--remove','coverage.lcov']+remove,stdout=coverage_stripped_lcov,stderr=coverage_log)
if not os.path.isdir('coverage'):
os.makedirs('coverage')
subprocess.call('genhtml -o coverage coverage-stripped.lcov'.split(),stdout=coverage_log,stderr=coverage_log)
except:
Logs.warn('Failed to run lcov, no coverage report will be generated')
finally:
coverage_stripped_lcov.close()
coverage_lcov.close()
coverage_log.close()
print('')
Logs.pprint('GREEN',"Waf: Leaving directory `%s'"%os.path.abspath(os.getcwd()))
top_level=(len(ctx.stack_path)>1)
if top_level:
cd_to_orig_dir(ctx,top_level)
print('')
Logs.pprint('BOLD','Coverage:',sep='')
print('<file://%s>\n\n'%os.path.abspath('coverage/index.html'))
def run_test(ctx,appname,test,desired_status=0,dirs=['src'],name='',header=False):
s=test
if type(test)==type([]):
s=' '.join(i)
if header:
Logs.pprint('BOLD','** Test',sep='')
Logs.pprint('NORMAL','%s'%s)
cmd=test
if Options.options.grind:
cmd='valgrind '+test
if subprocess.call(cmd,shell=True)==desired_status:
Logs.pprint('GREEN','** Pass %s'%name)
return True
else:
Logs.pprint('RED','** FAIL %s'%name)
return False
def run_tests(ctx,appname,tests,desired_status=0,dirs=['src'],name='*',headers=False):
failures=0
diropts=''
for i in dirs:
diropts+=' -d '+i
for i in tests:
if not run_test(ctx,appname,i,desired_status,dirs,i,headers):
failures+=1
print('')
if failures==0:
Logs.pprint('GREEN','** Pass: All %s.%s tests passed'%(appname,name))
else:
Logs.pprint('RED','** FAIL: %d %s.%s tests failed'%(failures,appname,name))
def run_ldconfig(ctx):
if(ctx.cmd=='install'and not ctx.env['RAN_LDCONFIG']and ctx.env['LIBDIR']and not'DESTDIR'in os.environ and not Options.options.destdir):
try:
Logs.info("Waf: Running `/sbin/ldconfig %s'"%ctx.env['LIBDIR'])
subprocess.call(['/sbin/ldconfig',ctx.env['LIBDIR']])
ctx.env['RAN_LDCONFIG']=True
except:
pass
def write_news(name,in_files,out_file,top_entries=None,extra_entries=None):
import rdflib
import textwrap
from time import strftime,strptime
doap=rdflib.Namespace('http://usefulinc.com/ns/doap#')
dcs=rdflib.Namespace('http://ontologi.es/doap-changeset#')
rdfs=rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')
foaf=rdflib.Namespace('http://xmlns.com/foaf/0.1/')
rdf=rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
m=rdflib.ConjunctiveGraph()
try:
for i in in_files:
m.parse(i,format='n3')
except:
Logs.warn('Error parsing data, unable to generate NEWS')
return
proj=m.value(None,rdf.type,doap.Project)
for f in m.triples([proj,rdfs.seeAlso,None]):
if f[2].endswith('.ttl'):
m.parse(f[2],format='n3')
entries={}
for r in m.triples([proj,doap.release,None]):
release=r[2]
revision=m.value(release,doap.revision,None)
date=m.value(release,doap.created,None)
blamee=m.value(release,dcs.blame,None)
changeset=m.value(release,dcs.changeset,None)
dist=m.value(release,doap['file-release'],None)
if revision and date and blamee and changeset:
entry='%s (%s) stable;\n'%(name,revision)
for i in m.triples([changeset,dcs.item,None]):
item=textwrap.wrap(m.value(i[2],rdfs.label,None),width=79)
entry+='\n * '+'\n '.join(item)
if dist and top_entries is not None:
if not str(dist)in top_entries:
top_entries[str(dist)]=[]
top_entries[str(dist)]+=['%s: %s'%(name,'\n '.join(item))]
if extra_entries:
for i in extra_entries[str(dist)]:
entry+='\n * '+i
entry+='\n\n --'
blamee_name=m.value(blamee,foaf.name,None)
blamee_mbox=m.value(blamee,foaf.mbox,None)
if blamee_name and blamee_mbox:
entry+=' %s <%s>'%(blamee_name,blamee_mbox.replace('mailto:',''))
entry+=' %s\n\n'%(strftime('%a, %d %b %Y %H:%M:%S +0000',strptime(date,'%Y-%m-%d')))
entries[(date,revision)]=entry
else:
Logs.warn('Ignored incomplete %s release description'%name)
if len(entries)>0:
news=open(out_file,'w')
for e in sorted(entries.keys(),reverse=True):
news.write(entries[e])
news.close()
|
gpl-2.0
|
apkbox/nano-rpc
|
third_party/protobuf/src/python/mox.py
|
603
|
38237
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
|
bsd-3-clause
|
albertomurillo/ansible
|
test/units/modules/packaging/os/test_rhn_register.py
|
31
|
9758
|
import contextlib
import json
import os
from units.compat.mock import mock_open
from ansible.module_utils import basic
from ansible.module_utils._text import to_native
import ansible.module_utils.six
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.modules.packaging.os import rhn_register
import pytest
SYSTEMID = """<?xml version="1.0"?>
<params>
<param>
<value><struct>
<member>
<name>system_id</name>
<value><string>ID-123456789</string></value>
</member>
</struct></value>
</param>
</params>
"""
def skipWhenAllModulesMissing(modules):
"""Skip the decorated test unless one of modules is available."""
for module in modules:
try:
__import__(module)
return False
except ImportError:
continue
return True
orig_import = __import__
@pytest.fixture
def import_libxml(mocker):
def mock_import(name, *args, **kwargs):
if name in ['libxml2', 'libxml']:
raise ImportError()
else:
return orig_import(name, *args, **kwargs)
if ansible.module_utils.six.PY3:
mocker.patch('builtins.__import__', side_effect=mock_import)
else:
mocker.patch('__builtin__.__import__', side_effect=mock_import)
@pytest.fixture
def patch_rhn(mocker):
load_config_return = {
'serverURL': 'https://xmlrpc.rhn.redhat.com/XMLRPC',
'systemIdPath': '/etc/sysconfig/rhn/systemid'
}
mocker.patch.object(rhn_register.Rhn, 'load_config', return_value=load_config_return)
mocker.patch.object(rhn_register, 'HAS_UP2DATE_CLIENT', mocker.PropertyMock(return_value=True))
@pytest.mark.skipif(skipWhenAllModulesMissing(['libxml2', 'libxml']), reason='none are available: libxml2, libxml')
def test_systemid_with_requirements(capfd, mocker, patch_rhn):
"""Check 'msg' and 'changed' results"""
mocker.patch.object(rhn_register.Rhn, 'enable')
mock_isfile = mocker.patch('os.path.isfile', return_value=True)
mocker.patch('ansible.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
rhn = rhn_register.Rhn()
assert '123456789' == to_native(rhn.systemid)
@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
def test_systemid_requirements_missing(capfd, mocker, patch_rhn, import_libxml):
"""Check that missing dependencies are detected"""
mocker.patch('os.path.isfile', return_value=True)
mocker.patch('ansible.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
with pytest.raises(SystemExit):
rhn_register.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'Missing arguments' in results['msg']
@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
def test_without_required_parameters(capfd, patch_rhn):
"""Failure must occurs when all parameters are missing"""
with pytest.raises(SystemExit):
rhn_register.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'Missing arguments' in results['msg']
TESTED_MODULE = rhn_register.__name__
TEST_CASES = [
[
# Registering an unregistered host with channels
{
'channels': 'rhel-x86_64-server-6',
'username': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('channel.software.setSystemChannels', [1]),
('auth.logout', [1]),
],
'is_registered': False,
'is_registered.call_count': 1,
'enable.call_count': 1,
'systemid.call_count': 2,
'changed': True,
'msg': "System successfully registered to 'rhn.redhat.com'.",
'run_command.call_count': 1,
'run_command.call_args': '/usr/sbin/rhnreg_ks',
'request_called': True,
'unlink.call_count': 0,
}
],
[
# Registering an unregistered host without channels
{
'activationkey': 'key',
'username': 'user',
'password': 'pass',
},
{
'calls': [
],
'is_registered': False,
'is_registered.call_count': 1,
'enable.call_count': 1,
'systemid.call_count': 0,
'changed': True,
'msg': "System successfully registered to 'rhn.redhat.com'.",
'run_command.call_count': 1,
'run_command.call_args': '/usr/sbin/rhnreg_ks',
'request_called': False,
'unlink.call_count': 0,
}
],
[
# Register an host already registered, check that result is unchanged
{
'activationkey': 'key',
'username': 'user',
'password': 'pass',
},
{
'calls': [
],
'is_registered': True,
'is_registered.call_count': 1,
'enable.call_count': 0,
'systemid.call_count': 0,
'changed': False,
'msg': 'System already registered.',
'run_command.call_count': 0,
'request_called': False,
'unlink.call_count': 0,
},
],
[
# Unregister an host, check that result is changed
{
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.deleteSystems', [1]),
('auth.logout', [1]),
],
'is_registered': True,
'is_registered.call_count': 1,
'enable.call_count': 0,
'systemid.call_count': 1,
'changed': True,
'msg': 'System successfully unregistered from rhn.redhat.com.',
'run_command.call_count': 0,
'request_called': True,
'unlink.call_count': 1,
}
],
[
# Unregister a unregistered host (systemid missing) locally, check that result is unchanged
{
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
},
{
'calls': [],
'is_registered': False,
'is_registered.call_count': 1,
'enable.call_count': 0,
'systemid.call_count': 0,
'changed': False,
'msg': 'System already unregistered.',
'run_command.call_count': 0,
'request_called': False,
'unlink.call_count': 0,
}
],
[
# Unregister an unknown host (an host with a systemid available locally, check that result contains failed
{
'activationkey': 'key',
'username': 'user',
'password': 'pass',
'state': 'absent',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.deleteSystems', xmlrpc_client.Fault(1003, 'The following systems were NOT deleted: 123456789')),
('auth.logout', [1]),
],
'is_registered': True,
'is_registered.call_count': 1,
'enable.call_count': 0,
'systemid.call_count': 1,
'failed': True,
'msg': "Failed to unregister: <Fault 1003: 'The following systems were NOT deleted: 123456789'>",
'run_command.call_count': 0,
'request_called': True,
'unlink.call_count': 0,
}
],
]
@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
def test_register_parameters(mocker, capfd, mock_request, patch_rhn, testcase):
# successful execution, no output
mocker.patch.object(basic.AnsibleModule, 'run_command', return_value=(0, '', ''))
mock_is_registered = mocker.patch.object(rhn_register.Rhn, 'is_registered', mocker.PropertyMock(return_value=testcase['is_registered']))
mocker.patch.object(rhn_register.Rhn, 'enable')
mock_systemid = mocker.patch.object(rhn_register.Rhn, 'systemid', mocker.PropertyMock(return_value=12345))
mocker.patch('os.unlink', return_value=True)
with pytest.raises(SystemExit):
rhn_register.main()
assert basic.AnsibleModule.run_command.call_count == testcase['run_command.call_count']
if basic.AnsibleModule.run_command.call_count:
assert basic.AnsibleModule.run_command.call_args[0][0][0] == testcase['run_command.call_args']
assert mock_is_registered.call_count == testcase['is_registered.call_count']
assert rhn_register.Rhn.enable.call_count == testcase['enable.call_count']
assert mock_systemid.call_count == testcase['systemid.call_count']
assert xmlrpc_client.Transport.request.called == testcase['request_called']
assert os.unlink.call_count == testcase['unlink.call_count']
out, err = capfd.readouterr()
results = json.loads(out)
assert results.get('changed') == testcase.get('changed')
assert results.get('failed') == testcase.get('failed')
assert results['msg'] == testcase['msg']
assert not testcase['calls'] # all calls should have been consumed
|
gpl-3.0
|
Impactstory/sherlockoa
|
put_repo_requests_in_db.py
|
2
|
5955
|
import csv
import os
import json
import gspread
import datetime
import re
import unicodecsv as csv
from app import db
from util import safe_commit
from emailer import send
from emailer import create_email
from endpoint import Endpoint
from repository import Repository
from repo_request import RepoRequest
def get_repo_request_rows():
from oauth2client.service_account import ServiceAccountCredentials
# this file inspired by https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html
# use creds to create a client to interact with the Google Drive API
scopes = ['https://spreadsheets.google.com/feeds']
json_creds = os.getenv("GOOGLE_SHEETS_CREDS_JSON")
creds_dict = json.loads(json_creds)
# hack to get around ugly new line escaping issues
# this works for me, but later found links to what might be cleaner solutions:
# use ast.literal_eval? https://github.com/googleapis/google-api-go-client/issues/185#issuecomment-422732250
# or maybe dumping like this might fix it? https://coreyward.svbtle.com/how-to-send-a-multiline-file-to-heroku-config
creds_dict["private_key"] = creds_dict["private_key"].replace("\\\\n", "\n")
# now continue
creds = ServiceAccountCredentials.from_json_keyfile_dict(creds_dict, scopes)
client = gspread.authorize(creds)
# Find a workbook by url
spreadsheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1RcQuetbKVYRRf0GhGZQi38okY8gT1cPUs6l3RM94yQo/edit#gid=704459328")
sheet = spreadsheet.sheet1
# Extract and print all of the values
rows = sheet.get_all_values()
print(rows[0:1])
return rows
def save_repo_request_rows(rows):
with open('out.csv','wb') as f:
w = csv.DictWriter(f, fieldnames=RepoRequest.list_fieldnames(), encoding='utf-8-sig')
for row in rows[1:]: # skip header row
my_repo_request = RepoRequest()
my_repo_request.set_id_seed(row[0])
column_num = 0
for fieldname in RepoRequest.list_fieldnames():
if fieldname != "id":
setattr(my_repo_request, fieldname, row[column_num])
column_num += 1
w.writerow(my_repo_request.to_dict())
print u"adding repo request {}".format(my_repo_request)
db.session.merge(my_repo_request)
safe_commit(db)
def add_endpoint(my_request):
if not my_request.pmh_url:
return None
endpoint_with_this_id = Endpoint.query.filter(Endpoint.repo_request_id==my_request.id).first()
if endpoint_with_this_id:
print u"one already matches {}".format(my_request.id)
return None
raw_endpoint = my_request.pmh_url
clean_endpoint = raw_endpoint.strip()
clean_endpoint = clean_endpoint.strip("?")
clean_endpoint = re.sub(u"\?verb=.*$", "", clean_endpoint, re.IGNORECASE)
clean_endpoint = re.sub(u"^https?://api\.unpaywall\.org/repository/endpoint/test/", "", clean_endpoint, re.IGNORECASE)
print u"raw endpoint is {}, clean endpoint is {}".format(raw_endpoint, clean_endpoint)
matching_endpoint = Endpoint()
matching_endpoint.pmh_url = clean_endpoint
repo_matches = my_request.matching_repositories()
if repo_matches:
matching_repo = repo_matches[0]
print u"yay! for {} {} matches repository {}".format(
my_request.institution_name, my_request.repo_name, matching_repo)
else:
print u"no matching repository for {}: {}".format(
my_request.institution_name, my_request.repo_name)
matching_repo = Repository()
# overwrite stuff with request
matching_repo.institution_name = my_request.institution_name
matching_repo.repository_name = my_request.repo_name
matching_repo.home_page = my_request.repo_home_page
matching_endpoint.repo_unique_id = matching_repo.id
matching_endpoint.email = my_request.email
matching_endpoint.repo_request_id = my_request.id
matching_endpoint.ready_to_run = True
matching_endpoint.set_identify_and_initial_query()
db.session.merge(matching_endpoint)
db.session.merge(matching_repo)
print u"added {} {}".format(matching_endpoint, matching_repo)
print u"see at url http://unpaywall.org/sources/repository/{}".format(matching_endpoint.id)
safe_commit(db)
print "saved"
print "now sending email"
# get the endpoint again, so it gets with all the meta info etc
matching_endpoint = Endpoint.query.get(matching_endpoint.id)
matching_endpoint.contacted_text = "automated welcome email"
matching_endpoint.contacted = datetime.datetime.utcnow().isoformat()
safe_commit(db)
send_announcement_email(matching_endpoint)
print "email sent"
return matching_endpoint
def send_announcement_email(my_endpoint):
my_endpoint_id = my_endpoint.id
email_address = my_endpoint.email
repo_name = my_endpoint.repo.repository_name
institution_name = my_endpoint.repo.institution_name
print my_endpoint_id, email_address, repo_name, institution_name
# prep email
email = create_email(email_address,
"Update on your Unpaywall indexing request (ref: {} )".format(my_endpoint_id),
"repo_pulse",
{"data": {"endpoint_id": my_endpoint_id, "repo_name": repo_name, "institution_name": institution_name}},
[])
send(email, for_real=True)
if __name__ == "__main__":
rows = get_repo_request_rows()
save_repo_request_rows(rows)
my_requests = RepoRequest.query.all()
for my_request in my_requests:
if not my_request.is_duplicate:
add_endpoint(my_request)
# my_endpoints = Endpoint.query.filter(Endpoint.contacted_text=="automated welcome email")
# for my_endpoint in my_endpoints:
# print "would send an email to {}".format(my_endpoint)
# send_announcement_email(my_endpoint)
|
mit
|
robk5uj/invenio
|
modules/webjournal/lib/widgets/bfe_webjournal_widget_latestPhoto.py
|
8
|
3938
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - display photos from given collections
"""
from invenio.bibformat_engine import BibFormatObject
from invenio.search_engine import perform_request_search
from invenio.config import CFG_CERN_SITE, CFG_SITE_URL, CFG_SITE_RECORD
def format_element(bfo, collections, max_photos="3", separator="<br/>"):
"""
Display the latest pictures from the given collection(s)
@param collections: comma-separated list of collection form which photos have to be fetched
@param max_photos: maximum number of photos to display
@param separator: separator between photos
"""
try:
int_max_photos = int(max_photos)
except:
int_max_photos = 0
try:
collections_list = [coll.strip() for coll in collections.split(',')]
except:
collections_list = []
out = get_widget_html(bfo.lang, int_max_photos,
collections_list, separator, bfo.lang)
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
def get_widget_html(language, max_photos, collections, separator, ln):
"""
Returns the content of the widget
"""
latest_photo_ids = perform_request_search(c=collections,
rg=max_photos,
of='id')
images_urls = []
for recid in latest_photo_ids[:max_photos]:
try:
photo_record = BibFormatObject(recid)
except:
# todo: Exception, no photo in this selection
continue
if language == "fr":
try:
title = photo_record.fields('246_1a', escape=1)[0]
except KeyError:
title = ""
else:
try:
title = photo_record.fields('245__a', escape=1)[0]
except KeyError:
# todo: exception, picture with no title
title = ""
if CFG_CERN_SITE and photo_record.fields('8567_'):
# Get from 8567_
dfs_images = photo_record.fields('8567_')
for image_block in dfs_images:
if image_block.get("y", '') == "Icon":
if image_block.get("u", '').startswith("http://"):
images_urls.append((recid, image_block["u"], title))
break # Just one image per record
else:
# Get from 8564_
images = photo_record.fields('8564_')
for image_block in images:
if image_block.get("x", '').lower() == "icon":
if image_block.get("q", '').startswith("http://"):
images_urls.append((recid, image_block["q"], title))
break # Just one image per record
# Build output
html_out = separator.join(['<a href="%s/%s/%i?ln=%s"><img class="phr" width="100" height="67" src="%s"/>%s</a>' % (CFG_SITE_URL, CFG_SITE_RECORD, recid, ln, photo_url, title) for (recid, photo_url, title) in images_urls])
return html_out
|
gpl-2.0
|
farazaftab/sjhschool
|
node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
1534
|
3426
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.