repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mlperf/training_results_v0.7 | Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-512/object_detection/region_similarity_calculator.py | 5 | 4537 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| apache-2.0 | 2,971,167,608,454,835,700 | 32.607407 | 80 | 0.692528 | false |
davidoj/nervanagpu | benchmarks/gemm_test.py | 4 | 2797 | #!/usr/bin/python
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nervanagpu import NervanaGPU
from pycuda.autoinit import context
from time import sleep
np.set_printoptions(threshold=8193, linewidth=600, formatter={'float':lambda x: "% .1f" % x})
dtype = np.float32
ng = NervanaGPU(stochastic_round=False, bench=False)
small = (1,2,3,4,5,6,7,8,9,16,32,64,65,72,120,127,128,192)
medium = (1,64,192,778,785,786,787,794)
big = (1,64,192,1532,1535,1536,1537,1540)
for size in (small,medium,big): # small, medium, big
for m in size:
for n in (size):
for op in ("tn","nn","nt"): # "tn","nn","nt",
for k in size:
print("op,M,N,K: ", op, m, n, k)
dimA = (m,k) if op[0] == 'n' else (k,m)
dimB = (k,n) if op[1] == 'n' else (n,k)
dimC = (m,n)
cpuA = np.random.uniform(-1.0, 1.0, dimA).astype(np.float32)
cpuB = np.random.uniform(-1.0, 1.0, dimB).astype(np.float32)
#cpuB = np.identity(n, dtype=dtype)
devA = ng.array(cpuA, dtype=dtype)
devB = ng.array(cpuB, dtype=dtype)
devC = ng.empty(dimC, dtype=dtype)
#repeat = min(int(50.0 * 4096**3 / (m * n * k)), 1000)
if op[0] == 't': cpuA, devA = cpuA.T, devA.T
if op[1] == 't': cpuB, devB = cpuB.T, devB.T
ng.dot(devA, devB, devC, repeat=1)
#context.synchronize()
cpuC = np.dot(cpuA, cpuB)
cpuD = devC.get()
diff = np.absolute(cpuC - cpuD)
max_diff = diff.max()
print(max_diff, cpuD.max())
if max_diff > 0.1 or max_diff != max_diff:
#print(m, n, k, max_diff)
print(cpuD[::max(m//16,1),::max(n//16,1)])
print(cpuC[::max(m//16,1),::max(n//16,1)])
print(diff[::max(m//16,1),::max(n//16,1)])
exit()
# print(max_diff, diff.min(), np.sum(cpuC) - np.sum(cpuD))
| apache-2.0 | -4,568,549,063,002,603,500 | 35.324675 | 93 | 0.52306 | false |
SivagnanamCiena/asa-capture | asa-capture.py | 2 | 2741 | # 2014-07-10/DN - attempting to implement argparse so that user, pwd & IP can be
# passed in from CLI.
# 2014-07-10/DN - Works with my ASA but I had to add an enable option as the enable pwd
# is different. Might be nice to default the enable password to the user password if
# that was supplied.
import pexpect #module for logging into the ASA
import sys #module for writing files to log/linux shell
import argparse #parsing command line arguments
# 2014-07-10/DN - debugging to clear the screen with each run
#import os #operating system options
#os.system('cls' if os.name == 'nt' else 'clear')
parser = argparse.ArgumentParser(description='Get "show version" from a Cisco ASA.')
parser.add_argument('-u', '--user', default='cisco', help='user name to login with (default=cisco)')
parser.add_argument('-p', '--password', default='cisco', help='password to login with (default=cisco)')
parser.add_argument('-e', '--enable', default='cisco', help='password for enable (default=cisco)')
parser.add_argument('-d', '--device', default='192.168.120.160', help='device to login to (default=192.168.120.160)')
args = parser.parse_args()
#child becomes the object to send/receive commands from the ASA
child = pexpect.spawn('ssh '+args.user+'@'+args.device)
#for debugging we send the input and output to the linux shell
child.logfile_read = sys.stdout
child.logfile_send = sys.stdout
#familiar process of logging into a cisco device
#expect waits for response from the console
#some special characters here like:
# . means any character
# + means the previous character 1 or more times
# * means the previous character 0 or more times
#the print commands are here in case you run into trouble and will give you an idea where the script stopped
print 'expecting password'
child.expect('.*password: ')
print 'sending password'
child.sendline(args.password)
print 'expecting login'
#expecting the hostname> prompt
child.expect('.*> ')
child.sendline('enable')
#expecting the enable password prompt
child.expect('Password.*')
print 'sending password'
child.sendline(args.enable)
print 'expecting exec'
#expecting a login prompt of hostname#
child.expect('.*#.*')
#setting the terminal length to infinity so we don't need to press space or enter to continue the prompt
child.sendline('terminal pager 0')
#setting a new file for output so we can write output from the screen to a file for later
fout = file(args.device+'.log','w')
child.expect('.*#.*')
#setting the show version output to a file
child.logfile_read = fout
child.sendline('show version')
#expecting the hostname# prompt
child.expect('.*#.*')
fout.close() #closing the file for best practice
child.sendline('exit') # logout of the ASA
exit()
| mit | -7,041,156,504,277,109,000 | 36.547945 | 119 | 0.736228 | false |
PythonCharmers/FunkLoad | src/funkload/ReportRenderHtmlGnuPlot.py | 3 | 25471 | # (C) Copyright 2009 Nuxeo SAS <http://nuxeo.com>
# Author: [email protected]
# Contributors: Kelvin Ward
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
"""Render chart using gnuplot >= 4.2
$Id$
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import re
from commands import getstatusoutput
from .apdex import Apdex
from .ReportRenderRst import rst_title
from .ReportRenderHtmlBase import RenderHtmlBase
from datetime import datetime
from .MonitorPlugins import MonitorPlugins
from .MonitorPluginsDefault import MonitorCPU, MonitorMemFree, MonitorNetwork, MonitorCUs
def gnuplot(script_path):
"""Execute a gnuplot script."""
path = os.path.dirname(os.path.abspath(script_path))
if sys.platform.lower().startswith('win'):
# commands module doesn't work on win and gnuplot is named
# wgnuplot
ret = os.system('cd "' + path + '" && wgnuplot "' +
os.path.abspath(script_path) + '"')
if ret != 0:
raise RuntimeError("Failed to run wgnuplot cmd on " +
os.path.abspath(script_path))
else:
cmd = 'cd "' + path + '"; gnuplot "' + os.path.abspath(script_path) + '"'
ret, output = getstatusoutput(cmd)
if ret != 0:
raise RuntimeError("Failed to run gnuplot cmd: " + cmd +
"\n" + str(output))
def gnuplot_scriptpath(base, filename):
"""Return a file path string from the join of base and file name for use
inside a gnuplot script.
Backslashes (the win os separator) are replaced with forward
slashes. This is done because gnuplot scripts interpret backslashes
specially even in path elements.
"""
return os.path.join(base, filename).replace("\\", "/")
class FakeMonitorConfig:
def __init__(self, name):
self.name = name
class RenderHtmlGnuPlot(RenderHtmlBase):
"""Render stats in html using gnuplot
Simply render stuff in ReST then ask docutils to build an html doc.
"""
chart_size = (640, 540)
#big_chart_size = (640, 480)
ticpattern = re.compile('(\:\d+)\ ')
def getChartSizeTmp(self, cvus):
"""Override for gnuplot format"""
return str(self.chart_size[0]) + ',' + str(self.chart_size[1])
def getXRange(self):
"""Return the max CVUs range."""
maxCycle = self.config['cycles'].split(',')[-1]
maxCycle = str(maxCycle[:-1].strip())
if maxCycle.startswith("["):
maxCycle = maxCycle[1:]
return "[0:" + str(int(maxCycle) + 1) + "]"
def useXTicLabels(self):
"""Guess if we need to use labels for x axis or number."""
cycles = self.config['cycles'][1:-1].split(',')
if len(cycles) <= 1:
# single cycle
return True
if len(cycles) != len(set(cycles)):
# duplicates cycles
return True
cycles = [int(i) for i in cycles]
for i, v in enumerate(cycles[1:]):
# unordered cycles
if cycles[i] > v:
return True
return False
def fixXLabels(self, lines):
"""Fix gnuplot script if CUs are not ordered."""
if not self.useXTicLabels():
return lines
# remove xrange line
out = lines.replace('set xrange', '#set xrange')
# rewrite plot using xticlabels
out = out.replace(' 1:', ' :')
out = self.ticpattern.sub(r'\1:xticlabels(1) ', out)
return out
def createTestChart(self):
"""Create the test chart."""
image_path = gnuplot_scriptpath(self.report_dir, 'tests.png')
gplot_path = str(os.path.join(self.report_dir, 'tests.gplot'))
data_path = gnuplot_scriptpath(self.report_dir, 'tests.data')
stats = self.stats
# data
lines = ["CUs STPS ERROR"]
cvus = []
has_error = False
for cycle in self.cycles:
if 'test' not in stats[cycle]:
continue
values = []
test = stats[cycle]['test']
values.append(str(test.cvus))
cvus.append(str(test.cvus))
values.append(str(test.tps))
error = test.error_percent
if error:
has_error = True
values.append(str(error))
lines.append(' '.join(values))
if len(lines) == 1:
# No tests finished during the cycle
return
f = open(data_path, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
# script
lines = ['set output "' + image_path +'"']
lines.append('set title "Successful Tests Per Second"')
lines.append('set terminal png size ' + self.getChartSizeTmp(cvus))
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "Test/s"')
lines.append('set grid back')
lines.append('set xrange ' + self.getXRange())
if not has_error:
lines.append('plot "%s" u 1:2 w linespoints lw 2 lt 2 t "STPS"' % data_path)
else:
lines.append('set format x ""')
lines.append('set multiplot')
lines.append('unset title')
lines.append('unset xlabel')
lines.append('set size 1, 0.7')
lines.append('set origin 0, 0.3')
lines.append('set lmargin 5')
lines.append('set bmargin 0')
lines.append('plot "%s" u 1:2 w linespoints lw 2 lt 2 t "STPS"' % data_path)
lines.append('set format x "% g"')
lines.append('set bmargin 3')
lines.append('set autoscale y')
lines.append('set style fill solid .25')
lines.append('set size 1.0, 0.3')
lines.append('set ytics 20')
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "% errors"')
lines.append('set origin 0.0, 0.0')
lines.append('set yrange [0:100]')
lines.append('plot "%s" u 1:3 w linespoints lt 1 lw 2 t "%% Errors"' % data_path)
lines.append('unset multiplot')
f = open(gplot_path, 'w')
lines = self.fixXLabels('\n'.join(lines) + '\n')
f.write(lines)
f.close()
gnuplot(gplot_path)
return
def appendDelays(self, delay, delay_low, delay_high, stats):
""" Show percentiles or min, avg and max in chart. """
if self.options.with_percentiles:
delay.append(stats.percentiles.perc50)
delay_low.append(stats.percentiles.perc10)
delay_high.append(stats.percentiles.perc90)
else:
delay.append(stats.avg)
delay_low.append(stats.min)
delay_high.append(stats.max)
def createPageChart(self):
"""Create the page chart."""
image_path = gnuplot_scriptpath(self.report_dir, 'pages_spps.png')
image2_path = gnuplot_scriptpath(self.report_dir, 'pages.png')
gplot_path = str(os.path.join(self.report_dir, 'pages.gplot'))
data_path = gnuplot_scriptpath(self.report_dir, 'pages.data')
stats = self.stats
# data
lines = ["CUs SPPS ERROR MIN AVG MAX P10 P50 P90 P95 APDEX E G F P U"]
cvus = []
has_error = False
for cycle in self.cycles:
if 'page' not in stats[cycle]:
continue
values = []
page = stats[cycle]['page']
values.append(str(page.cvus))
cvus.append(str(page.cvus))
values.append(str(page.rps))
error = page.error_percent
if error:
has_error = True
values.append(str(error))
values.append(str(page.min))
values.append(str(page.avg))
values.append(str(page.max))
values.append(str(page.percentiles.perc10))
values.append(str(page.percentiles.perc50))
values.append(str(page.percentiles.perc90))
values.append(str(page.percentiles.perc95))
score = page.apdex_score
values.append(str(score))
apdex = ['0', '0', '0', '0', '0']
score_cls = Apdex.get_score_class(score)
score_classes = Apdex.score_classes[:] #copy
#flip from worst-to-best to best-to-worst
score_classes.reverse()
index = score_classes.index(score_cls)
apdex[index] = str(score)
values = values + apdex
lines.append(' '.join(values))
if len(lines) == 1:
# No pages finished during a cycle
return
f = open(data_path, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
# script
lines = ['set output "' + image_path +'"']
lines.append('set title "Successful Pages Per Second"')
lines.append('set ylabel "Pages Per Second"')
lines.append('set grid back')
lines.append('set xrange ' + self.getXRange())
lines.append('set terminal png size ' + self.getChartSizeTmp(cvus))
lines.append('set format x ""')
lines.append('set multiplot')
lines.append('unset title')
lines.append('unset xlabel')
lines.append('set bmargin 0')
lines.append('set lmargin 8')
lines.append('set rmargin 9.5')
lines.append('set key inside top')
if has_error:
lines.append('set size 1, 0.4')
lines.append('set origin 0, 0.6')
else:
lines.append('set size 1, 0.6')
lines.append('set origin 0, 0.4')
lines.append('plot "%s" u 1:2 w linespoints lw 2 lt 2 t "SPPS"' % data_path)
# apdex
lines.append('set boxwidth 0.8')
lines.append('set style fill solid .7')
lines.append('set ylabel "Apdex %.1f" ' % Apdex.T)
lines.append('set yrange [0:1]')
lines.append('set key outside top')
if has_error:
lines.append('set origin 0.0, 0.3')
lines.append('set size 1.0, 0.3')
else:
lines.append('set size 1.0, 0.4')
lines.append('set bmargin 3')
lines.append('set format x "% g"')
lines.append('set xlabel "Concurrent Users"')
lines.append('set origin 0.0, 0.0')
lines.append('plot "%s" u 1:12 w boxes lw 2 lt rgb "#99CDFF" t "E", "" u 1:13 w boxes lw 2 lt rgb "#00FF01" t "G", "" u 1:14 w boxes lw 2 lt rgb "#FFFF00" t "F", "" u 1:15 w boxes lw 2 lt rgb "#FF7C81" t "P", "" u 1:16 w boxes lw 2 lt rgb "#C0C0C0" t "U"' % data_path)
lines.append('unset boxwidth')
lines.append('set key inside top')
if has_error:
lines.append('set bmargin 3')
lines.append('set format x "% g"')
lines.append('set xlabel "Concurrent Users"')
lines.append('set origin 0.0, 0.0')
lines.append('set size 1.0, 0.3')
lines.append('set ylabel "% errors"')
lines.append('set yrange [0:100]')
lines.append('plot "%s" u 1:3 w boxes lt 1 lw 2 t "%% Errors"' % data_path)
lines.append('unset yrange')
lines.append('set autoscale y')
lines.append('unset multiplot')
lines.append('set size 1.0, 1.0')
lines.append('unset rmargin')
lines.append('set output "%s"' % image2_path)
lines.append('set title "Pages Response time"')
lines.append('set ylabel "Duration (s)"')
lines.append('set bars 5.0')
lines.append('set style fill solid .25')
lines.append('plot "%s" u 1:8:8:10:9 t "med/p90/p95" w candlesticks lt 1 lw 1 whiskerbars 0.5, "" u 1:7:4:8:8 w candlesticks lt 2 lw 1 t "min/p10/med" whiskerbars 0.5, "" u 1:5 t "avg" w lines lt 3 lw 2' % data_path)
f = open(gplot_path, 'w')
lines = self.fixXLabels('\n'.join(lines) + '\n')
f.write(lines)
f.close()
gnuplot(gplot_path)
def createRPSTimeChart(self):
"""Create a RPS chart where X-axis represent the time in seconds."""
img_path = gnuplot_scriptpath(self.report_dir, 'time_rps.png')
plot_path = gnuplot_scriptpath(self.report_dir, 'time_rps.gplot')
stats = self.stats
start_timeline = sys.maxsize
end_timeline = -1
max_rps = 0
min_rps = 0
for cycle in self.cycles:
dpath = gnuplot_scriptpath(self.report_dir,
'time_rps-{0}.data'.format(cycle))
f = open(dpath, 'w')
f.write('Timeline RPS\n')
try:
st = stats[cycle]['response']
for k in sorted(st.per_second.iterkeys()):
if k < start_timeline:
start_timeline = k
if k > end_timeline:
end_timeline = k
if st.per_second[k] > max_rps:
max_rps = st.per_second[k]
f.write('{0} {1}\n'.format(k, st.per_second[k]))
except Exception as e:
print("Exception: {0}".format(e))
finally:
f.close
#print "max rps: {0}".format(max_rps)
#print "time range: {0}-{1}".format(start_timeline, end_timeline)
max_rps = int(max_rps * 1.25)
f = open(plot_path, "w")
lines = []
lines.append('set output "{0}"'.format(img_path))
lines.append('set title "Request Per Second over time"')
lines.append('set xlabel "Time line"')
lines.append('set xdata time')
lines.append('set timefmt "%s"')
lines.append('set format x "%H:%M"')
lines.append('set ylabel "RPS"')
lines.append('set grid')
#lines.append('set xrange [{0}:{1}]'.format(0, end_timeline - start_timeline))
lines.append('set yrange [{0}:{1}]'.format(min_rps, max_rps))
# I don't know why self.getChartSizeTmp() accept cvus which is not used currently.
cvus = []
lines.append('set terminal png size ' + self.getChartSizeTmp(cvus))
plot_line = 'plot '
colors = [
# This RGB value used for the line color for each cycle.
# TODO: use more pretty color?
"000000",
"0000FF",
"00FA9A",
"191970",
"8B008B",
"FF00FF",
"FFD700",
"0000CD",
"00BFFF",
"00FF00",
"7FFF00",
"FF0000",
"FF8C00",
];
for i, cycle in enumerate(self.cycles):
if i != 0:
plot_line += ', \\\n'
dpath = gnuplot_scriptpath(self.report_dir,
'time_rps-{0}.data'.format(cycle))
#lines.append('set size 1,1\n')
#lines.append('set origin 0,0\n')
#plot_line += '"' + dpath + '" u ($1 - {0}):($2)'.format(start_timeline)
plot_line += '"' + dpath + '" u ($1):($2)'
plot_line += ' w linespoints smooth sbezier lw 1 lt 2 lc ' + \
'rgbcolor "#696969" notitle'
plot_line += ', \\\n'
#plot_line += '"' + dpath + '" u ($1 - {0}):($2)'.format(start_timeline)
plot_line += '"' + dpath + '" u ($1):($2)'
plot_line += ' w linespoints lw 1 lt 2 lc ' + \
'rgbcolor "#{0}" t "{1} CUs"'.format(colors[i % len(colors)],
stats[cycle]['response'].cvus)
lines.append(plot_line)
#lines.append('unset multiplot\n')
lines = self.fixXLabels('\n'.join(lines) + '\n')
f.write(lines)
f.close()
gnuplot(plot_path)
return
def createAllResponseChart(self):
"""Create global responses chart."""
self.createRPSTimeChart()
image_path = gnuplot_scriptpath(self.report_dir, 'requests_rps.png')
image2_path = gnuplot_scriptpath(self.report_dir, 'requests.png')
gplot_path = str(os.path.join(self.report_dir, 'requests.gplot'))
data_path = gnuplot_scriptpath(self.report_dir, 'requests.data')
stats = self.stats
# data
lines = ["CUs RPS ERROR MIN AVG MAX P10 P50 P90 P95 APDEX"]
cvus = []
has_error = False
for cycle in self.cycles:
if 'response' not in stats[cycle]:
continue
values = []
resp = stats[cycle]['response']
values.append(str(resp.cvus))
cvus.append(str(resp.cvus))
values.append(str(resp.rps))
error = resp.error_percent
if error:
has_error = True
values.append(str(error))
values.append(str(resp.min))
values.append(str(resp.avg))
values.append(str(resp.max))
values.append(str(resp.percentiles.perc10))
values.append(str(resp.percentiles.perc50))
values.append(str(resp.percentiles.perc90))
values.append(str(resp.percentiles.perc95))
values.append(str(resp.apdex_score))
lines.append(' '.join(values))
if len(lines) == 1:
# No result during a cycle
return
f = open(data_path, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
# script
lines = ['set output "' + image_path +'"']
lines.append('set title "Requests Per Second"')
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "Requests Per Second"')
lines.append('set grid')
lines.append('set xrange ' + self.getXRange())
lines.append('set terminal png size ' + self.getChartSizeTmp(cvus))
if not has_error:
lines.append('plot "%s" u 1:2 w linespoints lw 2 lt 2 t "RPS"' % data_path)
else:
lines.append('set format x ""')
lines.append('set multiplot')
lines.append('unset title')
lines.append('unset xlabel')
lines.append('set size 1, 0.7')
lines.append('set origin 0, 0.3')
lines.append('set lmargin 5')
lines.append('set bmargin 0')
lines.append('plot "%s" u 1:2 w linespoints lw 2 lt 2 t "RPS"' % data_path)
lines.append('set format x "% g"')
lines.append('set bmargin 3')
lines.append('set autoscale y')
lines.append('set style fill solid .25')
lines.append('set size 1.0, 0.3')
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "% errors"')
lines.append('set origin 0.0, 0.0')
#lines.append('set yrange [0:100]')
#lines.append('set ytics 20')
lines.append('plot "%s" u 1:3 w linespoints lt 1 lw 2 t "%% Errors"' % data_path)
lines.append('unset multiplot')
lines.append('set size 1.0, 1.0')
lines.append('set output "%s"' % image2_path)
lines.append('set title "Requests Response time"')
lines.append('set ylabel "Duration (s)"')
lines.append('set bars 5.0')
lines.append('set grid back')
lines.append('set style fill solid .25')
lines.append('plot "%s" u 1:8:8:10:9 t "med/p90/p95" w candlesticks lt 1 lw 1 whiskerbars 0.5, "" u 1:7:4:8:8 w candlesticks lt 2 lw 1 t "min/p10/med" whiskerbars 0.5, "" u 1:5 t "avg" w lines lt 3 lw 2' % data_path)
f = open(gplot_path, 'w')
lines = self.fixXLabels('\n'.join(lines) + '\n')
f.write(lines)
f.close()
gnuplot(gplot_path)
return
def createResponseChart(self, step):
"""Create responses chart."""
image_path = gnuplot_scriptpath(self.report_dir,
'request_%s.png' % step)
gplot_path = str(os.path.join(self.report_dir,
'request_%s.gplot' % step))
data_path = gnuplot_scriptpath(self.report_dir,
'request_%s.data' % step)
stats = self.stats
# data
lines = ["CUs STEP ERROR MIN AVG MAX P10 P50 P90 P95 APDEX"]
cvus = []
has_error = False
for cycle in self.cycles:
if step not in stats[cycle]['response_step']:
continue
values = []
resp = stats[cycle]['response_step'].get(step)
values.append(str(resp.cvus))
cvus.append(str(resp.cvus))
values.append(str(step))
error = resp.error_percent
if error:
has_error = True
values.append(str(error))
values.append(str(resp.min))
values.append(str(resp.avg))
values.append(str(resp.max))
values.append(str(resp.percentiles.perc10))
values.append(str(resp.percentiles.perc50))
values.append(str(resp.percentiles.perc90))
values.append(str(resp.percentiles.perc95))
values.append(str(resp.apdex_score))
lines.append(' '.join(values))
if len(lines) == 1:
# No result during a cycle
return
f = open(data_path, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
# script
lines = []
lines.append('set output "%s"' % image_path)
lines.append('set terminal png size ' + self.getChartSizeTmp(cvus))
lines.append('set grid')
lines.append('set bars 5.0')
lines.append('set title "Request %s Response time"' % step)
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "Duration (s)"')
lines.append('set grid back')
lines.append('set style fill solid .25')
lines.append('set xrange ' + self.getXRange())
if not has_error:
lines.append('plot "%s" u 1:8:8:10:9 t "med/p90/p95" w candlesticks lt 1 lw 1 whiskerbars 0.5, "" u 1:7:4:8:8 w candlesticks lt 2 lw 1 t "min/p10/med" whiskerbars 0.5, "" u 1:5 t "avg" w lines lt 3 lw 2' % data_path)
else:
lines.append('set format x ""')
lines.append('set multiplot')
lines.append('unset title')
lines.append('unset xlabel')
lines.append('set size 1, 0.7')
lines.append('set origin 0, 0.3')
lines.append('set lmargin 5')
lines.append('set bmargin 0')
lines.append('plot "%s" u 1:8:8:10:9 t "med/p90/p95" w candlesticks lt 1 lw 1 whiskerbars 0.5, "" u 1:7:4:8:8 w candlesticks lt 2 lw 1 t "min/p10/med" whiskerbars 0.5, "" u 1:5 t "avg" w lines lt 3 lw 2' % data_path)
lines.append('set format x "% g"')
lines.append('set bmargin 3')
lines.append('set autoscale y')
lines.append('set style fill solid .25')
lines.append('set size 1.0, 0.3')
lines.append('set xlabel "Concurrent Users"')
lines.append('set ylabel "% errors"')
lines.append('set origin 0.0, 0.0')
#lines.append('set yrange [0:100]')
#lines.append('set ytics 20')
lines.append('plot "%s" u 1:3 w linespoints lt 1 lw 2 t "%% Errors"' % data_path)
lines.append('unset multiplot')
lines.append('set size 1.0, 1.0')
f = open(gplot_path, 'w')
lines = self.fixXLabels('\n'.join(lines) + '\n')
f.write(lines)
f.close()
gnuplot(gplot_path)
return
def createMonitorChart(self, host):
"""Create monitrored server charts."""
stats = self.monitor[host]
times = []
cvus_list = []
for stat in stats:
test, cycle, cvus = stat.key.split(':')
stat.cvus=cvus
date = datetime.fromtimestamp(float(stat.time))
times.append(date.strftime("%H:%M:%S"))
#times.append(int(float(stat.time))) # - time_start))
cvus_list.append(cvus)
Plugins = MonitorPlugins()
Plugins.registerPlugins()
Plugins.configure(self.getMonitorConfig(host))
charts=[]
for plugin in Plugins.MONITORS.values():
image_prefix = gnuplot_scriptpath(self.report_dir, '%s_%s' % (host, plugin.name))
data_prefix = gnuplot_scriptpath(self.report_dir, '%s_%s' % (host, plugin.name))
gplot_path = str(os.path.join(self.report_dir, '%s_%s.gplot' % (host, plugin.name)))
r=plugin.gnuplot(times, host, image_prefix, data_prefix, gplot_path, self.chart_size, stats)
if r!=None:
gnuplot(gplot_path)
charts.extend(r)
return charts
| gpl-2.0 | 2,208,883,042,714,381,300 | 40.483713 | 276 | 0.54509 | false |
jayhetee/coveragepy | tests/test_process.py | 2 | 37293 | # coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for process behavior of coverage.py."""
import glob
import os
import os.path
import re
import sys
import textwrap
import coverage
from coverage import env, CoverageData
from tests.coveragetest import CoverageTest
HERE = os.path.dirname(__file__)
class ProcessTest(CoverageTest):
"""Tests of the per-process behavior of coverage.py."""
def number_of_data_files(self):
"""Return the number of coverage data files in this directory."""
num = 0
for f in os.listdir('.'):
if f.startswith('.coverage.') or f == '.coverage':
num += 1
return num
def test_save_on_exit(self):
self.make_file("mycode.py", """\
h = "Hello"
w = "world"
""")
self.assert_doesnt_exist(".coverage")
self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
def test_environment(self):
# Checks that we can import modules from the test directory at all!
self.make_file("mycode.py", """\
import covmod1
import covmodzip1
a = 1
print('done')
""")
self.assert_doesnt_exist(".coverage")
out = self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
self.assertEqual(out, 'done\n')
def make_b_or_c_py(self):
"""Create b_or_c.py, used in a few of these tests."""
self.make_file("b_or_c.py", """\
import sys
a = 1
if sys.argv[1] == 'b':
b = 1
else:
c = 1
d = 1
print('done')
""")
def test_combine_parallel_data(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
# After two -p runs, there should be two .coverage.machine.123 files.
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_combine_parallel_data_in_two_steps(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
# Combine the (one) parallel coverage data file into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_append_data(self):
self.make_b_or_c_py()
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
out = self.run_command("coverage run --append b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_append_data_with_different_file(self):
self.make_b_or_c_py()
self.make_file(".coveragerc", """\
[run]
data_file = .mycovdata
""")
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_exists(".mycovdata")
out = self.run_command("coverage run --append b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_exists(".mycovdata")
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".mycovdata")
self.assertEqual(data.line_counts()['b_or_c.py'], 7)
def test_append_can_create_a_data_file(self):
self.make_b_or_c_py()
out = self.run_command("coverage run --append b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has only 6 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['b_or_c.py'], 6)
def test_combine_with_rc(self):
self.make_b_or_c_py()
self.make_file(".coveragerc", """\
[run]
parallel = true
""")
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
out = self.run_command("coverage run b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
# After two runs, there should be two .coverage.machine.123 files.
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_exists(".coveragerc")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['b_or_c.py'], 7)
# Reporting should still work even with the .rc file
out = self.run_command("coverage report")
self.assertMultiLineEqual(out, textwrap.dedent("""\
Name Stmts Miss Cover
-------------------------------
b_or_c.py 7 0 100%
"""))
def test_combine_with_aliases(self):
self.make_file("d1/x.py", """\
a = 1
b = 2
print("%s %s" % (a, b))
""")
self.make_file("d2/x.py", """\
# 1
# 2
# 3
c = 4
d = 5
print("%s %s" % (c, d))
""")
self.make_file(".coveragerc", """\
[run]
parallel = True
[paths]
source =
src
*/d1
*/d2
""")
out = self.run_command("coverage run " + os.path.normpath("d1/x.py"))
self.assertEqual(out, '1 2\n')
out = self.run_command("coverage run " + os.path.normpath("d2/x.py"))
self.assertEqual(out, '4 5\n')
self.assertEqual(self.number_of_data_files(), 2)
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage data file and see that the two different x.py
# files have been combined together.
data = coverage.CoverageData()
data.read_file(".coverage")
summary = data.line_counts(fullpath=True)
self.assertEqual(len(summary), 1)
actual = os.path.normcase(os.path.abspath(list(summary.keys())[0]))
expected = os.path.normcase(os.path.abspath('src/x.py'))
self.assertEqual(actual, expected)
self.assertEqual(list(summary.values())[0], 6)
def test_erase_parallel(self):
self.make_file(".coveragerc", """\
[run]
data_file = data.dat
parallel = True
""")
self.make_file("data.dat")
self.make_file("data.dat.fooey")
self.make_file("data.dat.gooey")
self.make_file(".coverage")
self.run_command("coverage erase")
self.assert_doesnt_exist("data.dat")
self.assert_doesnt_exist("data.dat.fooey")
self.assert_doesnt_exist("data.dat.gooey")
self.assert_exists(".coverage")
def test_missing_source_file(self):
# Check what happens if the source is missing when reporting happens.
self.make_file("fleeting.py", """\
s = 'goodbye, cruel world!'
""")
self.run_command("coverage run fleeting.py")
os.remove("fleeting.py")
out = self.run_command("coverage html -d htmlcov")
self.assertRegex(out, "No source for code: '.*fleeting.py'")
self.assertNotIn("Traceback", out)
# It happens that the code paths are different for *.py and other
# files, so try again with no extension.
self.make_file("fleeting", """\
s = 'goodbye, cruel world!'
""")
self.run_command("coverage run fleeting")
os.remove("fleeting")
status, out = self.run_command_status("coverage html -d htmlcov")
self.assertRegex(out, "No source for code: '.*fleeting'")
self.assertNotIn("Traceback", out)
self.assertEqual(status, 1)
def test_running_missing_file(self):
status, out = self.run_command_status("coverage run xyzzy.py")
self.assertRegex(out, "No file to run: .*xyzzy.py")
self.assertNotIn("raceback", out)
self.assertNotIn("rror", out)
self.assertEqual(status, 1)
def test_code_throws(self):
self.make_file("throw.py", """\
def f1():
raise Exception("hey!")
def f2():
f1()
f2()
""")
# The important thing is for "coverage run" and "python" to report the
# same traceback.
status, out = self.run_command_status("coverage run throw.py")
out2 = self.run_command("python throw.py")
if env.PYPY:
# Pypy has an extra frame in the traceback for some reason
lines2 = out2.splitlines()
out2 = "".join(l+"\n" for l in lines2 if "toplevel" not in l)
self.assertMultiLineEqual(out, out2)
# But also make sure that the output is what we expect.
self.assertIn('File "throw.py", line 5, in f2', out)
self.assertIn('raise Exception("hey!")', out)
self.assertNotIn('coverage', out)
self.assertEqual(status, 1)
def test_code_exits(self):
self.make_file("exit.py", """\
import sys
def f1():
print("about to exit..")
sys.exit(17)
def f2():
f1()
f2()
""")
# The important thing is for "coverage run" and "python" to have the
# same output. No traceback.
status, out = self.run_command_status("coverage run exit.py")
status2, out2 = self.run_command_status("python exit.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit..\n")
self.assertEqual(status, status2)
self.assertEqual(status, 17)
def test_code_exits_no_arg(self):
self.make_file("exit_none.py", """\
import sys
def f1():
print("about to exit quietly..")
sys.exit()
f1()
""")
status, out = self.run_command_status("coverage run exit_none.py")
status2, out2 = self.run_command_status("python exit_none.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit quietly..\n")
self.assertEqual(status, status2)
self.assertEqual(status, 0)
def test_coverage_run_is_like_python(self):
tryfile = os.path.join(HERE, "try_execfile.py")
with open(tryfile) as f:
self.make_file("run_me.py", f.read())
out_cov = self.run_command("coverage run run_me.py")
out_py = self.run_command("python run_me.py")
self.assertMultiLineEqual(out_cov, out_py)
def test_coverage_run_dashm_is_like_python_dashm(self):
# These -m commands assume the coverage tree is on the path.
out_cov = self.run_command("coverage run -m tests.try_execfile")
out_py = self.run_command("python -m tests.try_execfile")
self.assertMultiLineEqual(out_cov, out_py)
def test_coverage_run_dir_is_like_python_dir(self):
tryfile = os.path.join(HERE, "try_execfile.py")
with open(tryfile) as f:
self.make_file("with_main/__main__.py", f.read())
out_cov = self.run_command("coverage run with_main")
out_py = self.run_command("python with_main")
# The coverage.py results are not identical to the Python results, and
# I don't know why. For now, ignore those failures. If someone finds
# a real problem with the discrepancies, we can work on it some more.
ignored = r"__file__|__loader__|__package__"
# PyPy includes the current directory in the path when running a
# directory, while CPython and coverage.py do not. Exclude that from
# the comparison also...
if env.PYPY:
ignored += "|"+re.escape(os.getcwd())
out_cov = remove_matching_lines(out_cov, ignored)
out_py = remove_matching_lines(out_py, ignored)
self.assertMultiLineEqual(out_cov, out_py)
def test_coverage_run_dashm_equal_to_doubledashsource(self):
"""regression test for #328
When imported by -m, a module's __name__ is __main__, but we need the
--source machinery to know and respect the original name.
"""
# These -m commands assume the coverage tree is on the path.
out_cov = self.run_command(
"coverage run --source tests.try_execfile -m tests.try_execfile"
)
out_py = self.run_command("python -m tests.try_execfile")
self.assertMultiLineEqual(out_cov, out_py)
def test_coverage_run_dashm_superset_of_doubledashsource(self):
"""Edge case: --source foo -m foo.bar"""
# These -m commands assume the coverage tree is on the path.
out_cov = self.run_command(
"coverage run --source tests -m tests.try_execfile"
)
out_py = self.run_command("python -m tests.try_execfile")
self.assertMultiLineEqual(out_cov, out_py)
st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.assertEqual(self.line_count(out), 6, out)
def test_coverage_run_script_imports_doubledashsource(self):
# This file imports try_execfile, which compiles it to .pyc, so the
# first run will have __file__ == "try_execfile.py" and the second will
# have __file__ == "try_execfile.pyc", which throws off the comparison.
# Setting dont_write_bytecode True stops the compilation to .pyc and
# keeps the test working.
self.make_file("myscript", """\
import sys; sys.dont_write_bytecode = True
import tests.try_execfile
""")
# These -m commands assume the coverage tree is on the path.
out_cov = self.run_command(
"coverage run --source tests myscript"
)
out_py = self.run_command("python myscript")
self.assertMultiLineEqual(out_cov, out_py)
st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.assertEqual(self.line_count(out), 6, out)
def test_coverage_run_dashm_is_like_python_dashm_off_path(self):
# https://bitbucket.org/ned/coveragepy/issue/242
tryfile = os.path.join(HERE, "try_execfile.py")
self.make_file("sub/__init__.py", "")
with open(tryfile) as f:
self.make_file("sub/run_me.py", f.read())
out_cov = self.run_command("coverage run -m sub.run_me")
out_py = self.run_command("python -m sub.run_me")
self.assertMultiLineEqual(out_cov, out_py)
def test_coverage_run_dashm_is_like_python_dashm_with__main__207(self):
if sys.version_info < (2, 7):
# Coverage.py isn't bug-for-bug compatible in the behavior of -m for
# Pythons < 2.7
self.skip("-m doesn't work the same < Python 2.7")
# https://bitbucket.org/ned/coveragepy/issue/207
self.make_file("package/__init__.py", "print('init')")
self.make_file("package/__main__.py", "print('main')")
out_cov = self.run_command("coverage run -m package")
out_py = self.run_command("python -m package")
self.assertMultiLineEqual(out_cov, out_py)
def test_fork(self):
if not hasattr(os, 'fork'):
self.skip("Can't test os.fork since it doesn't exist.")
self.make_file("fork.py", """\
import os
def child():
print('Child!')
def main():
ret = os.fork()
if ret == 0:
child()
else:
os.waitpid(ret, 0)
main()
""")
out = self.run_command("coverage run -p fork.py")
self.assertEqual(out, 'Child!\n')
self.assert_doesnt_exist(".coverage")
# After running the forking program, there should be two
# .coverage.machine.123 files.
self.assertEqual(self.number_of_data_files(), 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assertEqual(self.number_of_data_files(), 1)
# Read the coverage file and see that b_or_c.py has all 7 lines
# executed.
data = coverage.CoverageData()
data.read_file(".coverage")
self.assertEqual(data.line_counts()['fork.py'], 9)
def test_warnings(self):
self.make_file("hello.py", """\
import sys, os
print("Hello")
""")
out = self.run_command("coverage run --source=sys,xyzzy,quux hello.py")
self.assertIn("Hello\n", out)
self.assertIn(textwrap.dedent("""\
Coverage.py warning: Module sys has no Python source.
Coverage.py warning: Module xyzzy was never imported.
Coverage.py warning: Module quux was never imported.
Coverage.py warning: No data was collected.
"""), out)
def test_warnings_during_reporting(self):
# While fixing issue #224, the warnings were being printed far too
# often. Make sure they're not any more.
self.make_file("hello.py", """\
import sys, os, the_other
print("Hello")
""")
self.make_file("the_other.py", """\
print("What?")
""")
self.make_file(".coveragerc", """\
[run]
source =
.
xyzzy
""")
self.run_command("coverage run hello.py")
out = self.run_command("coverage html")
self.assertEqual(out.count("Module xyzzy was never imported."), 0)
def test_warnings_if_never_run(self):
out = self.run_command("coverage run i_dont_exist.py")
self.assertIn("No file to run: 'i_dont_exist.py'", out)
self.assertNotIn("warning", out)
self.assertNotIn("Exception", out)
out = self.run_command("coverage run -m no_such_module")
self.assertTrue(
("No module named no_such_module" in out) or
("No module named 'no_such_module'" in out)
)
self.assertNotIn("warning", out)
self.assertNotIn("Exception", out)
def test_warnings_trace_function_changed_with_threads(self):
# https://bitbucket.org/ned/coveragepy/issue/164
self.make_file("bug164.py", """\
import threading
import time
class MyThread (threading.Thread):
def run(self):
print("Hello")
thr = MyThread()
thr.start()
thr.join()
""")
out = self.run_command("coverage run --timid bug164.py")
self.assertIn("Hello\n", out)
self.assertNotIn("warning", out)
def test_warning_trace_function_changed(self):
self.make_file("settrace.py", """\
import sys
print("Hello")
sys.settrace(None)
print("Goodbye")
""")
out = self.run_command("coverage run --timid settrace.py")
self.assertIn("Hello\n", out)
self.assertIn("Goodbye\n", out)
self.assertIn("Trace function changed", out)
def test_note(self):
self.make_file(".coveragerc", """\
[run]
data_file = mydata.dat
note = These are musical notes: ♫𝅗𝅥♩
""")
self.make_file("simple.py", """print('hello')""")
self.run_command("coverage run simple.py")
data = CoverageData()
data.read_file("mydata.dat")
infos = data.run_infos()
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0]['note'], u"These are musical notes: ♫𝅗𝅥♩")
def test_fullcoverage(self): # pragma: not covered
if env.PY2: # This doesn't work on Python 2.
self.skip("fullcoverage doesn't work on Python 2.")
# It only works with the C tracer, and if we aren't measuring ourselves.
if not env.C_TRACER or env.METACOV:
self.skip("fullcoverage only works with the C tracer.")
# fullcoverage is a trick to get stdlib modules measured from
# the very beginning of the process. Here we import os and
# then check how many lines are measured.
self.make_file("getenv.py", """\
import os
print("FOOEY == %s" % os.getenv("FOOEY"))
""")
fullcov = os.path.join(
os.path.dirname(coverage.__file__), "fullcoverage"
)
self.set_environ("FOOEY", "BOO")
self.set_environ("PYTHONPATH", fullcov)
out = self.run_command("python -m coverage run -L getenv.py")
self.assertEqual(out, "FOOEY == BOO\n")
data = coverage.CoverageData()
data.read_file(".coverage")
# The actual number of executed lines in os.py when it's
# imported is 120 or so. Just running os.getenv executes
# about 5.
self.assertGreater(data.line_counts()['os.py'], 50)
def test_deprecation_warnings(self):
# Test that coverage doesn't trigger deprecation warnings.
# https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
self.make_file("allok.py", """\
import warnings
warnings.simplefilter('default')
import coverage
print("No warnings!")
""")
out = self.run_command("python allok.py")
self.assertEqual(out, "No warnings!\n")
def test_run_twice(self):
# https://bitbucket.org/ned/coveragepy/issue/353/40a3-introduces-an-unexpected-third-case
self.make_file("foo.py", """\
def foo():
pass
""")
self.make_file("run_twice.py", """\
import coverage
for _ in [1, 2]:
inst = coverage.Coverage(source=['foo'])
inst.load()
inst.start()
import foo
inst.stop()
inst.combine()
inst.save()
""")
out = self.run_command("python run_twice.py")
self.assertEqual(
out,
"Coverage.py warning: Module foo was previously imported, but not measured.\n"
)
class AliasedCommandTest(CoverageTest):
"""Tests of the version-specific command aliases."""
run_in_temp_dir = False
def test_major_version_works(self):
# "coverage2" works on py2
cmd = "coverage%d" % sys.version_info[0]
out = self.run_command(cmd)
self.assertIn("Code coverage for Python", out)
def test_wrong_alias_doesnt_work(self):
# "coverage3" doesn't work on py2
badcmd = "coverage%d" % (5 - sys.version_info[0])
out = self.run_command(badcmd)
self.assertNotIn("Code coverage for Python", out)
def test_specific_alias_works(self):
# "coverage-2.7" works on py2.7
cmd = "coverage-%d.%d" % sys.version_info[:2]
out = self.run_command(cmd)
self.assertIn("Code coverage for Python", out)
class PydocTest(CoverageTest):
"""Test that pydoc can get our information."""
run_in_temp_dir = False
def assert_pydoc_ok(self, name, thing):
"""Check that pydoc of `name` finds the docstring from `thing`."""
# Run pydoc.
out = self.run_command("python -m pydoc " + name)
# It should say "Help on..", and not have a traceback
self.assert_starts_with(out, "Help on ")
self.assertNotIn("Traceback", out)
# All of the lines in the docstring should be there somewhere.
for line in thing.__doc__.splitlines():
self.assertIn(line.strip(), out)
def test_pydoc_coverage(self):
self.assert_pydoc_ok("coverage", coverage)
def test_pydoc_coverage_coverage(self):
self.assert_pydoc_ok("coverage.Coverage", coverage.Coverage)
class FailUnderTest(CoverageTest):
"""Tests of the --fail-under switch."""
def setUp(self):
super(FailUnderTest, self).setUp()
self.make_file("forty_two_plus.py", """\
# I have 42.857% (3/7) coverage!
a = 1
b = 2
if a > 3:
b = 4
c = 5
d = 6
e = 7
""")
st, _ = self.run_command_status("coverage run forty_two_plus.py")
self.assertEqual(st, 0)
st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.assertEqual(
self.last_line_squeezed(out),
"forty_two_plus.py 7 4 43%"
)
def test_report(self):
st, _ = self.run_command_status("coverage report --fail-under=42")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage report --fail-under=43")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage report --fail-under=44")
self.assertEqual(st, 2)
def test_html_report(self):
st, _ = self.run_command_status("coverage html --fail-under=42")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage html --fail-under=43")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage html --fail-under=44")
self.assertEqual(st, 2)
def test_xml_report(self):
st, _ = self.run_command_status("coverage xml --fail-under=42")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage xml --fail-under=43")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage xml --fail-under=44")
self.assertEqual(st, 2)
def test_fail_under_in_config(self):
self.make_file(".coveragerc", "[report]\nfail_under = 43\n")
st, _ = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.make_file(".coveragerc", "[report]\nfail_under = 44\n")
st, _ = self.run_command_status("coverage report")
self.assertEqual(st, 2)
class FailUnderNoFilesTest(CoverageTest):
"""Test that nothing to report results in an error exit status."""
def setUp(self):
super(FailUnderNoFilesTest, self).setUp()
self.make_file(".coveragerc", "[report]\nfail_under = 99\n")
def test_report(self):
st, out = self.run_command_status("coverage report")
self.assertIn('No data to report.', out)
self.assertEqual(st, 1)
def test_xml(self):
st, out = self.run_command_status("coverage xml")
self.assertIn('No data to report.', out)
self.assertEqual(st, 1)
def test_html(self):
st, out = self.run_command_status("coverage html")
self.assertIn('No data to report.', out)
self.assertEqual(st, 1)
class FailUnderEmptyFilesTest(CoverageTest):
"""Test that empty files produce the proper fail_under exit status."""
def setUp(self):
super(FailUnderEmptyFilesTest, self).setUp()
self.make_file(".coveragerc", "[report]\nfail_under = 99\n")
self.make_file("empty.py", "")
st, _ = self.run_command_status("coverage run empty.py")
self.assertEqual(st, 0)
def test_report(self):
st, _ = self.run_command_status("coverage report")
self.assertEqual(st, 2)
def test_xml(self):
st, _ = self.run_command_status("coverage xml")
self.assertEqual(st, 2)
def test_html(self):
st, _ = self.run_command_status("coverage html")
self.assertEqual(st, 2)
def possible_pth_dirs():
"""Produce a sequence of directories for trying to write .pth files."""
# First look through sys.path, and we find a .pth file, then it's a good
# place to put ours.
for d in sys.path:
g = glob.glob(os.path.join(d, "*.pth"))
if g:
yield d
# If we're still looking, then try the Python library directory.
# https://bitbucket.org/ned/coveragepy/issue/339/pth-test-malfunctions
import distutils.sysconfig # pylint: disable=import-error
yield distutils.sysconfig.get_python_lib()
class ProcessCoverageMixin(object):
"""Set up a .pth file to coverage-measure all sub-processes."""
def setUp(self):
super(ProcessCoverageMixin, self).setUp()
# Find a place to put a .pth file.
pth_contents = "import coverage; coverage.process_startup()\n"
for pth_dir in possible_pth_dirs(): # pragma: part covered
pth_path = os.path.join(pth_dir, "subcover.pth")
with open(pth_path, "w") as pth:
try:
pth.write(pth_contents)
self.pth_path = pth_path
break
except (IOError, OSError): # pragma: not covered
pass
else: # pragma: not covered
raise Exception("Couldn't find a place for the .pth file")
self.addCleanup(os.remove, self.pth_path)
class ProcessStartupTest(ProcessCoverageMixin, CoverageTest):
"""Test that we can measure coverage in sub-processes."""
def test_subprocess_with_pth_files(self): # pragma: not covered
if env.METACOV:
self.skip("Can't test sub-process pth file suppport during metacoverage")
# Main will run sub.py
self.make_file("main.py", """\
import os, os.path, sys
ex = os.path.basename(sys.executable)
os.system(ex + " sub.py")
""")
# sub.py will write a few lines.
self.make_file("sub.py", """\
with open("out.txt", "w") as f:
f.write("Hello, world!\\n")
""")
self.make_file("coverage.ini", """\
[run]
data_file = .mycovdata
""")
self.set_environ("COVERAGE_PROCESS_START", "coverage.ini")
import main # pylint: disable=import-error,unused-variable
with open("out.txt") as f:
self.assertEqual(f.read(), "Hello, world!\n")
# Read the data from .coverage
self.assert_exists(".mycovdata")
data = coverage.CoverageData()
data.read_file(".mycovdata")
self.assertEqual(data.line_counts()['sub.py'], 2)
class ProcessStartupWithSourceTest(ProcessCoverageMixin, CoverageTest):
"""Show that we can configure {[run]source} during process-level coverage.
There are three interesting variables, for a total of eight tests:
1. -m versus a simple script argument (for example, `python myscript`),
2. filtering for the top-level (main.py) or second-level (sub.py)
module, and
3. whether the files are in a package or not.
"""
def assert_pth_and_source_work_together(
self, dashm, package, source
): # pragma: not covered
"""Run the test for a particular combination of factors.
The arguments are all strings:
* `dashm`: Either "" (run the program as a file) or "-m" (run the
program as a module).
* `package`: Either "" (put the source at the top level) or a
package name to use to hold the source.
* `source`: Either "main" or "sub", which file to use as the
``--source`` argument.
"""
if env.METACOV:
self.skip("Can't test sub-process pth file suppport during metacoverage")
def fullname(modname):
"""What is the full module name for `modname` for this test?"""
if package and dashm:
return '.'.join((package, modname))
else:
return modname
def path(basename):
"""Where should `basename` be created for this test?"""
return os.path.join(package, basename)
# Main will run sub.py.
self.make_file(path("main.py"), """\
import %s
if True: pass
""" % fullname('sub'))
if package:
self.make_file(path("__init__.py"), "")
# sub.py will write a few lines.
self.make_file(path("sub.py"), """\
with open("out.txt", "w") as f:
f.write("Hello, world!")
""")
self.make_file("coverage.ini", """\
[run]
source = %s
""" % fullname(source))
self.set_environ("COVERAGE_PROCESS_START", "coverage.ini")
if dashm:
cmd = "python -m %s" % fullname('main')
else:
cmd = "python %s" % path('main.py')
self.run_command(cmd)
with open("out.txt") as f:
self.assertEqual(f.read(), "Hello, world!")
# Read the data from .coverage
self.assert_exists(".coverage")
data = coverage.CoverageData()
data.read_file(".coverage")
summary = data.line_counts()
print(summary)
self.assertEqual(summary[source + '.py'], 2)
self.assertEqual(len(summary), 1)
def test_dashm_main(self):
self.assert_pth_and_source_work_together('-m', '', 'main')
def test_script_main(self):
self.assert_pth_and_source_work_together('', '', 'main')
def test_dashm_sub(self):
self.assert_pth_and_source_work_together('-m', '', 'sub')
def test_script_sub(self):
self.assert_pth_and_source_work_together('', '', 'sub')
def test_dashm_pkg_main(self):
self.assert_pth_and_source_work_together('-m', 'pkg', 'main')
def test_script_pkg_main(self):
self.assert_pth_and_source_work_together('', 'pkg', 'main')
def test_dashm_pkg_sub(self):
self.assert_pth_and_source_work_together('-m', 'pkg', 'sub')
def test_script_pkg_sub(self):
self.assert_pth_and_source_work_together('', 'pkg', 'sub')
def remove_matching_lines(text, pat):
"""Return `text` with all lines matching `pat` removed."""
lines = [l for l in text.splitlines(True) if not re.search(pat, l)]
return "".join(lines)
| apache-2.0 | -291,478,364,422,682,300 | 35.047389 | 97 | 0.570708 | false |
Daniel-CA/odoo-addons | stock_lot_lifespan/models/stock_production_lot.py | 1 | 1228 | # -*- coding: utf-8 -*-
# Copyright 2017 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from dateutil.relativedelta import relativedelta
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
@api.onchange('mrp_date', 'life_date')
@api.multi
def onchange_mrp_life_date(self):
self.ensure_one()
if not self.mrp_date or not self.life_date:
return {}
stock_config_model = self.env['stock.config.settings']
mrp_date = fields.Date.from_string(self.mrp_date)
life_date = fields.Date.from_string(self.life_date)
lifespan = (life_date - mrp_date).days
vals = stock_config_model.get_default_stock_lot_percentage([])
variation1 = lifespan * vals.get('stock_lot_percentage1', 0) / 100
variation2 = lifespan * vals.get('stock_lot_percentage2', 0) / 100
variation3 = lifespan * vals.get('stock_lot_percentage3', 0) / 100
self.alert_date = mrp_date + relativedelta(days=variation1)
self.removal_date = mrp_date + relativedelta(days=variation2)
self.use_date = mrp_date + relativedelta(days=variation3)
| agpl-3.0 | -2,681,940,370,119,119,000 | 41.344828 | 74 | 0.665309 | false |
ExpHP/crank | plotutil.py | 1 | 1861 |
from colorsys import hls_to_rgb
import numpy as np
import math
class WavePlotter:
def plot(self, ax, psigrid, evo):
''' Produces a plot on ax. Returns the produced drawable, and records it for later modification through update(). '''
raise NotImplementedError
def update(self, psigrid, evo):
''' Updates the plot from the most recent call to plot() with new data, and returns a list of drawables to blit. '''
raise NotImplementedError
def clearAxisLabels(ax):
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
#---------------
class ProbPlotter(WavePlotter):
def __init__(self):
self.img = None
def plot(self, ax, psigrid, evo):
clearAxisLabels(ax)
ax.set_title('Probability')
prob = np.power(np.absolute(psigrid),2)
self.img = ax.imshow(prob, interpolation='nearest')
return self.img
def update(self, psigrid, evo):
if self.img is None:
raise RuntimeError("No existing plot to update")
prob = np.power(np.absolute(psigrid),2)
self.img.set_array(prob)
self.img.set_clim(0., prob.max()**0.97)
return [self.img]
#---------------
class PhasePlotter(WavePlotter):
def __init__(self):
self.img = None
def plot(self, ax, psigrid, evo):
clearAxisLabels(ax)
ax.set_title('Phase')
self.img = ax.imshow(getPhaseRGB(psigrid), interpolation='nearest')
return self.img
def update(self, psigrid, evo):
if self.img is None:
raise RuntimeError("No existing plot to update")
self.img.set_array(getPhaseRGB(psigrid))
return [self.img]
def getPhaseRGB(z):
h = (np.angle(z) + math.pi) / (2 * math.pi) + 0.5
l = 1.0 - 1.0/(1.0 + np.abs(z)**0.3)
l = l**2.
l /= l.max()
l *= 0.60
s = 0.8
c = np.vectorize(hls_to_rgb) (h,l,s) # --> tuple
c = np.array(c) # --> array of (3,n,m) shape, but need (n,m,3)
c = c.transpose(1,2,0)
return c
#----------------
| mit | -7,844,894,315,189,843,000 | 22.858974 | 120 | 0.648576 | false |
gnumdk/lollypop | lollypop/helper_passwords.py | 1 | 7833 | # Copyright (c) 2017 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
gi.require_version('Secret', '1')
from gi.repository import Secret, GLib
from lollypop.utils import debug
class PasswordsHelper:
"""
Simpler helper for Secret
"""
def __init__(self):
"""
Init helper
"""
self.__secret = None
Secret.Service.get(Secret.ServiceFlags.NONE, None,
self.__on_get_secret)
def get(self, service, callback, *args):
"""
Call function
@param service as str
@param callback as function
@param args
"""
try:
self.__wait_for_secret(self.get, service, callback, *args)
SecretSchema = {
"service": Secret.SchemaAttributeType.STRING
}
SecretAttributes = {
"service": service
}
schema = Secret.Schema.new("org.gnome.Lollypop",
Secret.SchemaFlags.NONE,
SecretSchema)
self.__secret.search(schema, SecretAttributes,
Secret.SearchFlags.ALL,
None,
self.__on_secret_search,
service,
callback,
*args)
except Exception as e:
debug("PasswordsHelper::get(): %s" % e)
def store(self, service, login, password, callback, *args):
"""
Store password
@param service as str
@param login as str
@param password as str
@param callback as function
"""
try:
self.__wait_for_secret(self.store,
service,
login,
password,
callback,
*args)
schema_string = "org.gnome.Lollypop: %s@%s" % (service, login)
SecretSchema = {
"service": Secret.SchemaAttributeType.STRING,
"login": Secret.SchemaAttributeType.STRING,
}
SecretAttributes = {
"service": service,
"login": login
}
schema = Secret.Schema.new("org.gnome.Lollypop",
Secret.SchemaFlags.NONE,
SecretSchema)
Secret.password_store(schema, SecretAttributes,
Secret.COLLECTION_DEFAULT,
schema_string,
password,
None,
callback,
*args)
except Exception as e:
debug("PasswordsHelper::store(): %s" % e)
def clear(self, service, callback=None, *args):
"""
Clear password
@param service as str
@param callback as function
"""
try:
self.__wait_for_secret(self.clear, service, callback, *args)
SecretSchema = {
"service": Secret.SchemaAttributeType.STRING
}
SecretAttributes = {
"service": service
}
schema = Secret.Schema.new("org.gnome.Lollypop",
Secret.SchemaFlags.NONE,
SecretSchema)
self.__secret.search(schema,
SecretAttributes,
Secret.SearchFlags.ALL,
None,
self.__on_clear_search,
callback,
*args)
except Exception as e:
debug("PasswordsHelper::clear(): %s" % e)
#######################
# PRIVATE #
#######################
def __wait_for_secret(self, call, *args):
"""
Wait for secret
@param call as function to call
@param args
@raise exception if waiting
"""
# Wait for secret
if self.__secret is None:
GLib.timeout_add(250, call, *args)
if self.__secret in [None, -1]:
raise Exception("Waiting Secret service")
def __on_clear_search(self, source, result, callback=None, *args):
"""
Clear passwords
@param source as GObject.Object
@param result as Gio.AsyncResult
"""
try:
if result is not None:
items = source.search_finish(result)
for item in items:
item.delete(None, None)
if callback is not None:
callback(*args)
except Exception as e:
debug("PasswordsHelper::__on_clear_search(): %s" % e)
def __on_load_secret(self, source, result, service, callback, *args):
"""
Set userservice/password input
@param source as GObject.Object
@param result as Gio.AsyncResult
@param service as str
@param index as int
@param count as int
@param callback as function
@param args
"""
secret = source.get_secret()
if secret is not None:
callback(source.get_attributes(),
secret.get().decode('utf-8'),
service,
*args)
else:
callback(None, None, service, *args)
def __on_secret_search(self, source, result, service, callback, *args):
"""
Set userservice/password input
@param source as GObject.Object
@param result as Gio.AsyncResult
@param service as str/None
@param callback as function
@param args
"""
try:
if result is not None:
items = self.__secret.search_finish(result)
for item in items:
item.load_secret(None,
self.__on_load_secret,
service,
callback,
*args)
if not items:
callback(None, None, service, *args)
else:
callback(None, None, service, *args)
except Exception as e:
debug("PasswordsHelper::__on_secret_search(): %s" % e)
callback(None, None, service, *args)
def __on_get_secret(self, source, result):
"""
Store secret proxy
@param source as GObject.Object
@param result as Gio.AsyncResult
"""
try:
self.__secret = Secret.Service.get_finish(result)
except Exception as e:
self.__secret = -1
debug("PasswordsHelper::__on_get_secret(): %s" % e)
| gpl-3.0 | 2,650,566,165,039,561,000 | 35.774648 | 75 | 0.469424 | false |
KellyChan/Python | javascript/backbone/backbone-templates/backbone-fileupload/fileupload/manage.py | 3 | 3855 | #!/usr/bin/env python
"""
Usage: manage.py {lms|cms} [--settings env] ...
Run django management commands. Because edx-platform contains multiple django projects,
the first argument specifies which project to run (cms [Studio] or lms [Learning Management System]).
By default, those systems run in with a settings file appropriate for development. However,
by passing the --settings flag, you can specify what environment specific settings file to use.
Any arguments not understood by this manage.py will be passed to django-admin.py
"""
from safe_lxml import defuse_xml_libs
defuse_xml_libs()
import os
import sys
import importlib
from argparse import ArgumentParser
import contracts
def parse_args():
"""Parse edx specific arguments to manage.py"""
parser = ArgumentParser()
subparsers = parser.add_subparsers(title='system', description='edX service to run')
lms = subparsers.add_parser(
'lms',
help='Learning management System',
add_help=False,
usage='%(prog)s [options] ...'
)
lms.add_arguement('-h', '--help', action='store_true', help='show this help message and exit')
lms.add_argument(
'--settings',
help="Which django settings module to use under lms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to lms.envs.dev"
)
lms.add_argument(
'--service-variant',
choices='store_true',
default=False,
help='Turn on pycontracts for local development'
)
lms.add_argument(
'--contracts',
action='store_true',
default=False,
help='Turn on pycontracts for local development'
)
lms.set_defaults(
help_string=lms.format_help(),
settings_base='lms/envs',
default_settings='lms.envs.dev',
startup='lms.startup',
)
cms = subparsers.add_parser(
'cms',
help='Studio',
add_help=False,
usage='%(prog)s [options] ...'
)
cms.add_argument(
'--settings',
help="Which django settings module to use under cms.envs. If not provided, the DJANGO_SETTINGS_MODULE "
"environment variable will be used if it is set, otherwise it will default to cms.envs.dev"
)
cms.add_argument(
'-h',
'--help',
action='store_true',
help='show this help message and exit'
)
cms.add_argument(
'--contracts',
action='store_true',
default=False,
help='Turn on pycontracts for local development'
)
cms.set_defaults(
help_string=cms.format_help(),
settings_base='cms/envs',
default_settings='cms.envs.dev',
service_variant='cms',
startup='cms.startup',
)
edx_args, django_args = parser.parse_known_args()
if edx_args.help:
print "edX:"
print edx_args.help_string
return edx_args, django_args
if __name__ == "__main__":
edx_args, django_args = parse_args()
if edx_args.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = edx_args.settings_base.replace('/', '.') + "." + edx_args.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", edx_args.default_settings)
os.environ.setdefault("SERVICE_VARIANT", edx_args.service_variant)
# can override with '--contracts' argument
if not enable_contracts and not edx_args.contracts:
contracts.disable_all()
if edx_args.help:
print "Django:"
# This will trigger django-admin.py to print out its help
django_args.append('--help')
startup = importlib.import_module(edx_args.startup)
startup.run()
from django.core.management import execute_from_command_line
execute_from_command_line([sys.argv[0]] + django_args)
| mit | 5,578,937,510,015,899,000 | 27.984962 | 113 | 0.638392 | false |
LiveZenLK/CeygateERP | addons/account/wizard/account_invoice_refund.py | 23 | 6463 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.tools.safe_eval import safe_eval as eval
from openerp.exceptions import UserError
class AccountInvoiceRefund(models.TransientModel):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
@api.model
def _get_reason(self):
context = dict(self._context or {})
active_id = context.get('active_id', False)
if active_id:
inv = self.env['account.invoice'].browse(active_id)
return inv.name
return ''
date_invoice = fields.Date(string='Refund Date', default=fields.Date.context_today, required=True)
date = fields.Date(string='Accounting Date')
description = fields.Char(string='Reason', required=True, default=_get_reason)
refund_only = fields.Boolean(string='Technical field to hide filter_refund in case invoice is partially paid', compute='_get_refund_only')
filter_refund = fields.Selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'), ('modify', 'Modify: create refund, reconcile and create a new draft invoice')],
default='refund', string='Refund Method', required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled')
@api.depends('date_invoice')
@api.one
def _get_refund_only(self):
invoice_id = self.env['account.invoice'].browse(self._context.get('active_id',False))
if len(invoice_id.payment_move_line_ids) != 0 and invoice_id.state != 'paid':
self.refund_only = True
else:
self.refund_only = False
@api.multi
def compute_refund(self, mode='refund'):
inv_obj = self.env['account.invoice']
inv_tax_obj = self.env['account.invoice.tax']
inv_line_obj = self.env['account.invoice.line']
context = dict(self._context or {})
xml_id = False
for form in self:
created_inv = []
date = False
description = False
for inv in inv_obj.browse(context.get('active_ids')):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise UserError(_('Cannot refund draft/proforma/cancelled invoice.'))
if inv.reconciled and mode in ('cancel', 'modify'):
raise UserError(_('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'))
date = form.date or False
description = form.description or inv.name
refund = inv.refund(form.date_invoice, date, description, inv.journal_id.id)
refund.compute_taxes()
created_inv.append(refund.id)
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_ids
to_reconcile_ids = {}
to_reconcile_lines = self.env['account.move.line']
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_lines += line
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconciled:
line.remove_move_reconcile()
refund.signal_workflow('invoice_open')
for tmpline in refund.move_id.line_ids:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_lines += tmpline
to_reconcile_lines.reconcile()
if mode == 'modify':
invoice = inv.read(
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term_id', 'account_id',
'currency_id', 'invoice_line_ids', 'tax_line_ids',
'journal_id', 'date'])
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(invoice['invoice_line_ids'])
invoice_lines = inv_obj._refund_cleanup_lines(invoice_lines)
tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
invoice.update({
'type': inv.type,
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line_ids': invoice_lines,
'tax_line_ids': tax_lines,
'date': date,
'name': description
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term_id', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_refund = inv_obj.create(invoice)
if inv_refund.payment_term_id.id:
inv_refund._onchange_payment_term_date_invoice()
created_inv.append(inv_refund.id)
xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
(inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
# Put the reason in the chatter
subject = _("Invoice refund")
body = description
refund.message_post(body=body, subject=subject)
if xml_id:
result = self.env.ref('account.%s' % (xml_id)).read()[0]
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
return True
@api.multi
def invoice_refund(self):
data_refund = self.read(['filter_refund'])[0]['filter_refund']
return self.compute_refund(data_refund)
| gpl-3.0 | 3,315,044,040,145,152,000 | 49.889764 | 205 | 0.520037 | false |
andersk/zulip | tools/lib/test_script.py | 5 | 4370 | import glob
import os
import subprocess
import sys
from argparse import ArgumentParser
from distutils.version import LooseVersion
from typing import Iterable, List, Optional, Tuple
from scripts.lib.zulip_tools import get_dev_uuid_var_path
from version import PROVISION_VERSION
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def get_major_version(v: str) -> int:
return int(v.split(".")[0])
def get_version_file() -> str:
uuid_var_path = get_dev_uuid_var_path()
return os.path.join(uuid_var_path, "provision_version")
PREAMBLE = """
Before we run tests, we make sure your provisioning version
is correct by looking at var/provision_version, which is at
version {}, and we compare it to the version in source
control (version.py), which is {}.
"""
def preamble(version: str) -> str:
text = PREAMBLE.format(version, PROVISION_VERSION)
text += "\n"
return text
NEED_TO_DOWNGRADE = """
It looks like you checked out a branch that expects an older
version of dependencies than the version you provisioned last.
This may be ok, but it's likely that you either want to rebase
your branch on top of upstream/master or re-provision your VM.
Do this: `./tools/provision`
"""
NEED_TO_UPGRADE = """
It looks like you checked out a branch that has added
dependencies beyond what you last provisioned. Your command
is likely to fail until you add dependencies by provisioning.
Do this: `./tools/provision`
"""
def get_provisioning_status() -> Tuple[bool, Optional[str]]:
version_file = get_version_file()
if not os.path.exists(version_file):
# If the developer doesn't have a version_file written by
# a previous provision, then we don't do any safety checks
# here on the assumption that the developer is managing
# their own dependencies and not running provision.
return True, None
with open(version_file) as f:
version = f.read().strip()
# Normal path for people that provision--we're all good!
if version == PROVISION_VERSION:
return True, None
# We may be more provisioned than the branch we just moved to. As
# long as the major version hasn't changed, then we should be ok.
if LooseVersion(version) > LooseVersion(PROVISION_VERSION):
if get_major_version(version) == get_major_version(PROVISION_VERSION):
return True, None
else:
return False, preamble(version) + NEED_TO_DOWNGRADE
return False, preamble(version) + NEED_TO_UPGRADE
def assert_provisioning_status_ok(skip_provision_check: bool) -> None:
if not skip_provision_check:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print(
"If you really know what you are doing, use --skip-provision-check to run anyway."
)
sys.exit(1)
def add_provision_check_override_param(parser: ArgumentParser) -> None:
"""
Registers --skip-provision-check argument to be used with various commands/tests in our tools.
"""
parser.add_argument(
"--skip-provision-check",
action="store_true",
help="Skip check that provision has been run; useful to save time if you know the dependency changes are not relevant to this command and will not cause it to fail",
)
def find_js_test_files(test_dir: str, files: Iterable[str]) -> List[str]:
test_files = []
for file in files:
for file_name in os.listdir(test_dir):
if file_name.startswith(file):
file = file_name
break
if not os.path.exists(file):
file = os.path.join(test_dir, file)
test_files.append(os.path.abspath(file))
if not test_files:
test_files = sorted(
glob.glob(os.path.join(test_dir, "*.ts")) + glob.glob(os.path.join(test_dir, "*.js"))
)
return test_files
def prepare_puppeteer_run(is_firefox: bool = False) -> None:
os.chdir(ZULIP_PATH)
# This will determine if the browser will be firefox or chrome.
os.environ["PUPPETEER_PRODUCT"] = "firefox" if is_firefox else "chrome"
subprocess.check_call(["node", "node_modules/puppeteer/install.js"])
os.makedirs("var/puppeteer", exist_ok=True)
for f in glob.glob("var/puppeteer/puppeteer-failure*.png"):
os.remove(f)
| apache-2.0 | 4,889,622,500,786,676,000 | 32.358779 | 173 | 0.671396 | false |
testalt/electrum-dgc | gui/qt/qrtextedit.py | 1 | 1725 | from electrum_dgc.i18n import _
from PyQt4.QtGui import *
from PyQt4.QtCore import *
class QRTextEdit(QPlainTextEdit):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.button = QToolButton(self)
self.button.setIcon(QIcon(":icons/qrcode.png"))
self.button.setStyleSheet("QToolButton { border: none; padding: 0px; }")
self.button.setVisible(True)
self.button.clicked.connect(lambda: self.qr_show() if self.isReadOnly() else self.qr_input())
self.setText = self.setPlainText
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
sz = self.button.sizeHint()
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
self.button.move(self.rect().right() - frameWidth - sz.width(),
(self.rect().bottom() - frameWidth - sz.height()))
return o
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
if self.isReadOnly():
m.addAction(_("Show as QR code"), self.qr_show)
else:
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
def qr_show(self):
from qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = unicode(self.toPlainText())
QRDialog(s).exec_()
def qr_input(self):
from electrum_dgc import qrscanner
try:
data = qrscanner.scan_qr(self.win.config)
except BaseException, e:
QMessageBox.warning(self.win, _('Error'), _(e), _('OK'))
return
if type(data) != str:
return
self.setText(data)
| gpl-3.0 | -3,892,015,273,093,110,300 | 34.204082 | 101 | 0.588986 | false |
MyRobotLab/pyrobotlab | home/scruffy-bob/faceidentification.py | 1 | 36627 | #
# This file will NOT run under MRL. I've uploaded it to give an example
# in Python of a face recognition algorithm. It has the ability to normalize
# training pictures (including resizing, tilting, and shifting), so all the
# training pictures are aligned to the others, making identification easier.
#
# On recognition, it also applies a mask (essentially an oval) to attempt to
# remove as much of the background as possible, leaving only the face.
#
# Purpose: Since there has been some discussion of face identification
# (not just face detection, but real identification), I thought I’d share what
# I’ve been working on. I started this before I found out about myrobotlab,
# so it was originally written to be a standalone program, but I hope to incorporate
# the functionality into MRL in the near future. I figured I’d get the existing code
# and explanation out there, so others with better programming skills can also use this
# as a reference point if they want to build off of what I’ve already put together.
#
# Caveat: While my degrees are in Computer Science, it’s been 20 years since I did
# any real programming, and I’ve just recently learning Python, so some of the things
# may not be as efficient as they could be.
#
# References: My program incorporates a lot of things that have already been done before.
# I don’t claim ownership of ANYTHING that hasn’t been done before, although I haven’t seen
# all of the elements put together quite like this before. Most of the implementation is
# mine, but I’ve reused code and examples from the following places:
#
# a. http://docs.opencv.org/2.4.8/modules/contrib/doc/facerec/index.html
# b. https://github.com/bytefish/facerec
# c. http://eyalarubas.com/face-detection-and-recognition.html
# d. https://code.google.com/archive/p/visual-control/downloads
#
# Some of the features that I’ve incorporated into my program:
# a. Face Identification using Linear Discriminant Analysis (LDA) using FisherFaces.
# I started using PCA - Principal Component Analysis (Eigenfaces) but found FisherFaces
# to be superior for the real-world environment I was going to be operating in.
#
# b. Face normalization during the learning phase. The program will not only detect a
# face, but it will also attempt to detect the components of the face like the eyes,
# nose and mouth. If those can be captured, the incoming image can be rescaled so all
# of the stored database pictures have the eyes and mouth in almost exactly the same position.
#
# Also, if we know the relative position of the eyes and mouth, I can account for tilted
# or skewed images as well in my normalization process. This makes recognition easier,
# regardless of the size or orientation of the images. I choose to use an affine transformation
# for this, but you can do it anyway you wish.
#
# c. Image masking. One of the big things that makes face identification difficult is the
# noise in the picture (all of the stuff that’s not part of the face). In my program, when
# creating my models for image comparison, I mask all of the pictures using essentially an oval that
# eliminates most of the background, leaving only the main part of the face.
#
# d. Preprocessing: The program optionally uses TanTriggs preprocessing to reduce variance by
# lighting differences between images. I’m still playing with this, so I can’t really tell
# if it’s better or not.
#
# e. Variable thresholding: The program has the ability to scale the threshold indicators.
# In a controlled image environment (where pictures always have the same size, orientation and
# lighting), you can tighten the threshold where the program will positively identify an image.
# In a less controlled environment, you may have to loosen the constraints for identification.
# The tradeoff in loosening the constraints is a higher “false positive” rate in which you mis-identifiy
# a face. The alternative if the constraints are too high is that you’ll miss identifying a face
# that you should identify (a false negative).
#
# f. Speech synthesis. Since I developed this before I discovered MRL, I was working on my own
# speech synthesis. If it identifies a face, it will articulate that identification. It will
# also withhold repeating that same name for a while, so it doesn’t bug you to death if you’re
# watching it for a long time.
#
# g. Learning mode: The program can learn new people from a webcam.
#
# h. GUI that includes webcam image, separate streaming windows for faces detected and faces identified,
# real-time adjustments.
#
# i. The Python program currently runs under Windows 10 with Python 2.7. I have not tried it in
# any other environment. I haven't included any of the Haar cascades, since they're easy to find on
# the internet
#
from facerec.feature import Fisherfaces
from facerec.preprocessing import TanTriggsPreprocessing
from facerec.operators import ChainOperator
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
import numpy as np
from PIL import Image
import sys, os
import time
import cv2
import multiprocessing
import time
import pyttsx
import random
#
# These are the identifier thresholds used to determine when a picture is 100% verified.
# For Fisherface processing, the threshold is arbitrarily set to 500, if we're using the Tan Triggs pre-processor,
# divide the threshold by 2.5 (e.g. 200 default). These can still be changed, but these are the defaults
TANTRIGGS_SCALER = 2.5
IDENTIFIED_THRESHOLD = 500
current_threshold = IDENTIFIED_THRESHOLD/TANTRIGGS_SCALER
#
# Maximum number of pictures to use for any particular subject. These are sourced
MAX_PER_SUBJECT = 10
current_max_subjects = MAX_PER_SUBJECT
#
# Various boolean flags used to turn various modes on and off
#
voice_on = True
details_on = True
bestguess_on = False
preprocess_on = True
debug_on = True
onoff = ["Off", "On"]
#
# Speech greetings
# lastgreeting is used to control how often we say something
# GREETINGTHRESHOLD is the number of seconds between utterances
#
GREETINGS = ["I spy with my little eye,",
"How have you been",
"How do you do",
"It's good to meet you",
"It's nice to meet you",
"Look, it's",
"Hey",
"Well, hello",
"What have you been up to",
"Hi there",
"Good to see you",
"It's been too long",
"What's new",
"How are you",
"I think I see",
"Is that",
"I'm pretty sure that is",
"That must be",
"Do I see",
"Hello",
"What's up",
"You're looking good",
"Howdy",
"Good afternoon",
"How is it going?",
"How are you doing?",
"What's up?",
"What's new?",
"Nice to see you" ]
GREETINGTHRESHOLD = 10 # The voice won't speak any faster than once every 10 seconds
GREETINGREPEAT = 60 # The voice won't repeat any person's name faster than once every 60 seconds
lastgreeting = 0 # The ID of the person we recognized last
greetingdict = dict() # An empty dictionary used to hold all the people we've seen in the last minute
#
# The directory to the image database
#
pathdir='database/'
filterfile = 'filter.png'
#
# Face filter used to mask edges of face pictures
#
facefilter = cv2.imread(filterfile, cv2.IMREAD_GRAYSCALE)
#
# The Haar cascades used to identify bulk faces, then facial features like eyes, noses and mouths.
#
haarcascade='haarcascade_frontalface_default.xml'
eyehaarcascade='haarcascade_eye.xml'
nosehaarcascade='haarcascade_nose.xml'
mouthhaarcascade='haarcascade_mouth.xml'
#
# Miscellenous counters
#
currentwindow = 0
currentmatch = 0
#--------------------------------------------------------------------------------------------
# HELPER FUNCTIONS
#--------------------------------------------------------------------------------------------
#
# Greet people that we recognize
#
def greet(person):
global lastgreeting, greetingdict
current_time = time.time()
#
# We don't want to overwhelm the speech synthesizer, so we limit
# to saying a specific person no more than once per minute
#
# We also don't want to say anything more than once every ten seconds or so
#
# See if we've already announced this person
#
if person in greetingdict:
last_time = greetingdict[person]
if current_time < (last_time + GREETINGREPEAT):
#
# We spoke this name less too recently, just skip it this time
#
return
else:
#
# We've seen this person before, but it was a while ago, so we can reannounce
greetingdict[person] = current_time
else:
#
# Newly recognized person, add them to dictionary
#
greetingdict[person] = current_time
if current_time > (lastgreeting + GREETINGTHRESHOLD):
#
# We haven't spoken recently, go ahead and give it a shot
#
if (voice_on):
engine.say(GREETINGS[random.randrange(len(GREETINGS))] + ", " + person )
engine.runAndWait()
lastgreeting = current_time
#
# Read in the database of known faces
#
def read_images(path, sz=(256,256)):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y, foldernames]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
foldernames: The list of all names in the database
"""
c = 0
X,y,Z = [], [], []
folder_names = [] # This will be the list of all known names in the database
#
# Files are in separate directories. The directory holds the "name", each
# of the images in the file are the samples for that name
#
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
folder_names.append(subdirname)
subject_path = os.path.join(dirname, subdirname)
number_for_this_subject = len(os.listdir(subject_path))
last_number = None
count = 1.0
saved = 0
for filename in os.listdir(subject_path):
try:
#
# Limit the number of images per person to no more than 10
# If there are more than 10, just take a sample of the 10
#
if int(count*current_max_subjects/number_for_this_subject) != last_number:
#
# Get the image file
#
im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)
#
# For some reason, windows sticks an indexing file into each directory
#
if filename != "Thumbs.db":
# resize to given size (if given)
if (sz is not None):
im = cv2.resize(im, sz)
im_mask = im & facefilter
X.append(np.asarray(im, dtype=np.uint8))
Z.append(np.asarray(im_mask, dtype=np.uint8))
y.append(c)
saved += 1
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
#
# Ignore unreadable files
#
print "Unknown file error:", sys.exc_info()[0], im, sz
pass
last_number = int(count*current_max_subjects/number_for_this_subject)
count += 1
if debug_on:
print saved, "images imported for subject[", c, "]: ", subdirname
c += 1
return [X,y,Z, folder_names]
#
# Within an image, find the eyes using a special Haar cascade
# If we find two eyes, we can draw squares around them in the image
# We need three things passed in:
# The original images that we can mark up - color
# The grayscale image that is prestine that we use for recognition
# The offset within the grayscale image where we already found the face [x, y, width, height]
#
def find_eyes(color_image, gray_image, face_location):
x,y,h,w = face_location
#
# Only look in the box where we found the face AND
# Only look in the top 2/3 of that box for the eyes
#
gray_face = gray_image[y:y+h*.66, x:x+w]
eyes = eye_cascade.detectMultiScale(gray_face)
#
# eyes[] is going to be a list of lists, with each one containing the x,y,h,w information for
# each of the eyes
#
# Only mark the eye boxes if we find EXACTLY two eyes
# AND the eyes are not overlapping. This takes care of most false positives and detection of the same eye more than once
#
if len(eyes) == 2:
ex0, ey0, ew0, eh0 = eyes[0] # Coordinates for the First eye
ex1, ey1, ew1, eh1 = eyes[1] # Coordinates for the Second eye
#
# If eyes came out in reversed order, make sure the left one is listed first
#
if ex0 > ex1:
eyes = [eyes[1], eyes[0]]
if max(0, min(ex0+ew0, ex1+eh1) - max(ex0, ex1)) == 0:
#
# Eyes don't overlap, so draw the boxes
#
eyecount=0
for (ex,ey,ew,eh) in eyes:
if details_on:
cv2.rectangle(color_image,(x+ex,y+ey),(x+ex+ew,y+ey+eh),(0,255,0),1)
#
# Now, we need to adjust so we return the ABSOLUTE position of the eye, not the relative position
#
eyes[eyecount] = [x+ex, y+ey, eh, ew]
eyecount += 1
return eyes
#
# Either we found two valid eyes, or we return nothing
#
return []
def find_nose(color_image, gray_image, face_location):
x,y,h,w = face_location
#
# Look only in middle 1/3 of frame for the nose
#
gray_face = gray_image[y+h*.33:y+h*.66, x:x+w]
nose = nose_cascade.detectMultiScale(gray_face)
#
# Only print nose box if we find EXACTLY one nose
#
if len(nose) == 1 and details_on:
for (ex,ey,ew,eh) in nose:
cv2.rectangle(color_image,(x+ex,y+int(ey+h*.33)),(x+ex+ew,int(y+ey+eh+h*.33)),(0,255,255),1)
return nose
def find_mouth(color_image, gray_image, face_location):
x,y,h,w = face_location
#
# Right now, look in the bottom third of the frame for the mouth
# The mouth will be horizontally in the middle 60% of the frame and vertically in the lower 1/3 of the frame
#
gray_face = gray_image[y+h*.66:y+h, x+w*.20:x+w*.80]
mouth = mouth_cascade.detectMultiScale(gray_face)
#
# Only print mouth box if we find EXACTLY one mouth
#
if len(mouth) == 1:
for (ex,ey,ew,eh) in mouth:
if details_on:
cv2.rectangle(color_image,(int(x+ex+w*.20),int(y+ey+h*.66)),(int(x+ex+ew+w*.20),int(y+ey+eh+h*.66)),(255,255,0),1)
return [[int(x+ex+w*.20),int(y+ey+h*.66),ew,eh]]
#
# If we found none or more than one mouth, just return an empty list
#
return []
# --------------------------------------------------------------------------------------------------------
# THE MAIN PART OF THE PROGRAM IS BELOW
# --------------------------------------------------------------------------------------------------------
# Initialize the text to speech engine
#
initial_time = time.time()
print "Loading speech engine: "
engine = pyttsx.init()
engine.setProperty('rate', 175)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[-1].id)
print "Load completed in {0:.2f} seconds.\n".format(time.time() - initial_time)
#
# Grab the webcam for our use
#
initial_time = time.time()
print "Appropriating webcam for video capture: "
vc=cv2.VideoCapture(0)
print "Appropriation completed in {0:.2f} seconds.\n".format(time.time() - initial_time)
#
# Set up the Haar cascade to detect (not recognize) the faces
#
#
# We're going to use the Fisherfaces face recognition module
#
initial_time = time.time()
print "Initializing Haar cascades for face, eyes, nose and mouth detection: "
#
# This was prior to using the TanTriggsPreprocessing, we can go back
#model = PredictableModel(Fisherfaces(), NearestNeighbor())
feature = ChainOperator(TanTriggsPreprocessing(), Fisherfaces())
classifier = NearestNeighbor()
model = PredictableModel(feature, classifier)
face_cascade = cv2.CascadeClassifier(haarcascade)
eye_cascade = cv2.CascadeClassifier(eyehaarcascade)
nose_cascade = cv2.CascadeClassifier(nosehaarcascade)
mouth_cascade = cv2.CascadeClassifier(mouthhaarcascade)
print "Initialization completed in {0:.2f} seconds.\n".format(time.time() - initial_time)
#
# Main loop
# Press "l" to learn a new image
# Press "r" to reload image database
# Press "v" to toggle voice synthesis
# Press "b" for best guess of image
# Press "e" to toggle eye detection
# Press "p" to preprocess pictures using TanTriggs
# Press <up/down arrow key> to increase/decrease detection threshold
# Press <left/right arrow key> to increase/decrease number of images to use per subject
# Press <esc> to quit
#
# Initialize and move windows
#
cv2.namedWindow('Identification Window')
cv2.moveWindow('Identification Window', 0,0)
#
# Here is where we'll place faces that we're seeing or learning (assumed to be 50x50)
# These are lined up along the bottom of the main picture window
#
cv2.namedWindow('Face - 0')
cv2.moveWindow('Face - 0', 0,500)
cv2.namedWindow('Face - 1')
cv2.moveWindow('Face - 1', 100,500)
cv2.namedWindow('Face - 2')
cv2.moveWindow('Face - 2', 200,500)
cv2.namedWindow('Face - 3')
cv2.moveWindow('Face - 3', 300,500)
cv2.namedWindow('Face - 4')
cv2.moveWindow('Face - 4', 400,500)
cv2.namedWindow('Face - 5')
cv2.moveWindow('Face - 5', 500,500)
#
# This is where we'll place faces that we've positively matched
# These are lined along the right side of the main picture window
#
cv2.namedWindow('Match - 0')
cv2.moveWindow('Match - 0', 640,0)
cv2.namedWindow('Match - 1')
cv2.moveWindow('Match - 1', 640,130)
cv2.namedWindow('Match - 2')
cv2.moveWindow('Match - 2', 640,260)
cv2.namedWindow('Match - 3')
cv2.moveWindow('Match - 3', 640,390)
cv2.namedWindow('Database - 0')
cv2.moveWindow('Database - 0', 740,0)
cv2.namedWindow('Database - 1')
cv2.moveWindow('Database - 1', 740,130)
cv2.namedWindow('Database - 2')
cv2.moveWindow('Database - 2', 740,260)
cv2.namedWindow('Database - 3')
cv2.moveWindow('Database - 3', 740,390)
lastpredicted_label = None
#
# The current_state changes, based on the current situation. Most of these are triggered by keypresses at the bottom
# State = "Loading" --> This means we need to reload the image database and reload the model
# = "Tracking" --> This is the normal operation where we're just looking for faces
# = "Learning" --> This is the mode where are capturing images and saving them to the database
#
current_state = "Loading"
#
# X is an empty list. If it's blank after we come back from reading images, then we know we can't identify anything
# until we record some images. A blank "X" triggers skipping most of the stuff except showing the original webcam image.
#
X = []
#
# The main loop: The only way out of this is to press the "escape" key to exit the program.
#
while (1):
#
# If we need to load the images and generate the model, do it here
#
if (current_state == "Loading"):
initial_time = time.time()
print "Importing image database for: "
[X,y,Z,subject_names] = read_images(pathdir)
subject_list = list(y)
print subject_names
print "Import complete in {0:.2f} seconds.\n".format(time.time() - initial_time)
#
# If X is null, we didn't find any pictures. If this is the case, don't bother trying to load
# any model (or do face recognition later)
#
if X != []:
#
# The next two lines just create a dictionary of the names, as follows:
# [ 1, first_name in database]
# [ 2, second_name in database]
# ...
# [ n, last_name in database]
#
# This dictionary is used in for the greeting and labeling
#
list_of_labels = list(xrange(max(y)+1))
subject_dictionary = dict(zip(list_of_labels, subject_names))
#
# This constructs the linear distriminant analysis matrix, which is used for facial identification
#
initial_time = time.time()
print "Constructing linear discriminant analysis matrix for facial identification: "
model.compute(Z,y)
print "Construction completed in {0:.2f} seconds.\n".format(time.time() - initial_time)
current_state = "Tracking"
#
# Get a new frame from the webcam
#
rval, frame = vc.read()
#
# Copy the frame adn convert the whole thing to black and white to make recognition easier
#
img = frame
rows,cols,ch = frame.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# Use the Haar Cascade to see if there are any faces in the picture
# This is the bulk face detector, but it doesn't do any recognition of individuals at this point.
#
faces = face_cascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(50, 50),flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
#
# For each face,
# 1. put a rectangle around it
# 2. A. If we're in pre-learning mode, wait until we get a name
# B. If we're in learning mode, save face and wait 1 second before starting again
# C. If we're in identification mode, try to identify it
#
for (x,y,w,h) in faces:
#
# Draw a blue box around the face that we've found
#
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#
# Now, see if we can find the eyes, nose and mouth
#
detected_eyes = find_eyes(img, gray, [x,y,h,w])
detected_noses = find_nose(img, gray, [x,y,h,w])
detected_mouths = find_mouth(img, gray, [x,y,h,w])
#
# Resize the face to the same size as the database of faces
#
if len(detected_eyes) != 2 or len(detected_mouths) != 1:
#
# We didn't get eyes or mouth for this picture, do a simple comparison
#
sampleImage = gray[y:y+h, x:x+w]
#sampleImage = cv2.resize(sampleImage, (256,256)) & facefilter <<<<----- Now filtering on input, not here
sampleImage = cv2.resize(sampleImage, (256,256))
#
# If we're tracking eyes, since we never detected eyes or a mouth.... too much chance of a bad match
# If we're not tracking details, go ahead and take a guess
#
if details_on:
continue
else:
#
# Normalize picture by resizing and retilting, based on location of eyes and mouth
# We're using a standard face model where the center of the left eye is at x,y = (30%, 45%)
# the center of the right eye is at x,y = (70%, 45%) and the center of the mouth is x,y (50%, 85%)
# This should realign any tilted or skewed faces into a simple normalized form.
#
centerleftx = detected_eyes[0][0]+detected_eyes[0][2]/2 # left side plus half width
centerlefty = detected_eyes[0][1]+detected_eyes[0][3]/2 # top plus half height
centerrightx = detected_eyes[1][0]+detected_eyes[1][2]/2 # left side plus half width
centerrighty = detected_eyes[1][1]+detected_eyes[1][3]/2 # top plus half height
centermouthx = detected_mouths[0][0]+detected_mouths[0][2]/2 # left size plus half width
centermouthy = detected_mouths[0][1]+detected_mouths[0][3]/2 # top plus half height
#
# Warp picture to realign eyes and mouth where we want them to be
# Eyes are at 30% from the left and right edges, 45% down from top of picture
# Mouth is centered in middle, 85% of the way down the page
#
pts1 = np.float32([[centerleftx,centerlefty],[centerrightx,centerrighty],[centermouthx,centermouthy]])
pts2 = np.float32([[cols*.3,rows*.45],[cols*.7,rows*.45],[cols*.50,rows*.85]])
#
# Affine tranformations take three points in the original picture and three points in the new picture.
# Based on those three points, all other points can be mapped from the old picture to the new picture.
# By choosing the center of the eyes and the middle of the mouth, this will have the effect of normalizing
# the picture by leveling out the eyes and putting the center of the mouth back into the center of the picture.
#
M = cv2.getAffineTransform(pts1,pts2)
warped_image = cv2.warpAffine(gray,M,(cols,rows))
#
# Now, all we have to do is resize the warped image into one we want to save
# First, we mask the borders to try to eliminate lighting-effects that aren't on the face itself
# This is just a black oval mask around the outside corners
#
# sampleImage = cv2.resize(warped_image, (256, 256)) & facefilter <<<----- Decided to filter on input
sampleImage = cv2.resize(warped_image, (256, 256))
#
# The display image is smaller (100x100) than the original picture
# These are displayed in consecutive windows (from #0 to #5) according to the algorithm below
display = cv2.resize(sampleImage, (100,100))
cv2.imshow('Face - '+str(currentwindow), display)
currentwindow = (currentwindow+1) % 6
#
# If we're in a learning mode, capture the picture
# pictures are kept in a directory structure indexed by name.
# All of the pictures are timestamped to make them unique and saved under the subject's directory
# This means if you reuse a name, it will just dump all the pictures into that directory.
#
if current_state == "Learning":
print pathdir+name+'/'+str(started_learning+current_saved)+'.jpg'
cv2.imwrite( pathdir+name+'/'+str(started_learning+current_saved)+'.jpg', sampleImage);
current_saved += 1
time.sleep(1)
#
# Keep learning for 15 seconds or until we capture 6 images
#
if current_state == "Learning":
if ((time.time() - started_learning) > 15) or (current_saved == 6):
#
# Stop learning if we've been learning for 15 seconds or saved 6 pictures
#
current_state = "Tracking"
else:
cv2.putText(frame,'Recorded '+str(current_saved)+' images for '+name, (x,y-3), cv2.FONT_HERSHEY_PLAIN,1,(0,0,250),1,1)
#
# If we're learning, skip back to the top of the loop
#
continue
#
# If we don't have anything in the database, skip the recognition part
#
if X == []:
break;
#
# Do we recognize the current face?
# The "predict" method will return the closest match of the current image to the database
#
finalimage = sampleImage & facefilter
[ predicted_label, generic_classifier_output] = model.predict(finalimage)
#
# Determine if the prediction is within a certain "threshold". This is actually the
# "distance" between the image and the database. The closer the distance is to "0", the
# closer a match it really is.
#
# Higher thresholds result in less accuracy or more mis-identified pictures.
#
if int(generic_classifier_output['distances'][0]) > current_threshold * 4:
high=current_threshold * 4
else:
high=int(generic_classifier_output['distances'][0])
#
# The percentage is calculated to tell us how close we are to a perfect match we have to the current image
# This is an ARBITRARY calculation. We could have done it anyway we wanted, but this seemed to work nicely
#
percentage = int((((current_threshold*4.0)-high)/(current_threshold*4.0))*100)
if debug_on:
print "Prediction:", subject_dictionary[predicted_label], str(percentage)+"%", generic_classifier_output['distances'][0]
#
# The percentage is high enough to call it a "match"
#
if percentage >= 80:
cv2.putText(img,str(subject_dictionary[predicted_label])+"["+str(percentage)+"%]", (x,y-3), cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,255,0),0,1)
#
# We definitively matched someone.
# Give them a little vocalized greeting :-)
#
greet(subject_dictionary[predicted_label])
#
# If we have a new match, the picture alongside the first match in the database
#
if predicted_label != lastpredicted_label:
display = cv2.resize(sampleImage, (100,100))
cv2.imshow('Match - '+str(currentmatch), display)
display = cv2.resize(X[subject_list.index(predicted_label)], (100,100))
cv2.imshow('Database - '+str(currentmatch), display)
currentmatch = (currentmatch+1) % 4
lastpredicted_label = predicted_label
#
# The percentage is not high enough for a match, but close enough for a "maybe"
#
elif percentage >= 70:
cv2.putText(img,str(subject_dictionary[predicted_label])+"["+str(percentage)+"%]", (x,y-3), cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,0,0),0,1)
#
# The percentage is not high enough for a maybe, but close enough for a "it kinda looks like him/her"
#
elif percentage > 60:
cv2.putText(img,str(subject_dictionary[predicted_label])+"["+str(percentage)+"%]", (x,y-3), cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,0,255),0,1)
#
# The percentage is not high enough for anything, but we've been asked to 'take our best guess'
#
elif bestguess_on:
cv2.putText(img,str(subject_dictionary[predicted_label])+"["+str(percentage)+"%]", (x,y-3), cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(0,0,255),0,1)
#
# Print the control information
# Note that the 'd' command (for debugging) doesn't appear on the screen
#
cv2.putText(img, "Identification Threshold: ", (390,20), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
cv2.putText(img, str(current_threshold), (600,20), cv2.FONT_HERSHEY_PLAIN,1,(0,255,0),0,1)
cv2.putText(img, "Speech Synthesis (v): ", (413,40), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
cv2.putText(img, onoff[voice_on], (600,40), cv2.FONT_HERSHEY_PLAIN,1,(0, 255,0),0,1)
cv2.putText(img, "Eye Tracking (e): ", (450,60), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
cv2.putText(img, onoff[details_on], (600,60), cv2.FONT_HERSHEY_PLAIN,1,(0,255,0),0,1)
cv2.putText(img, "Best Guess (b): ", (465,80), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
cv2.putText(img, onoff[bestguess_on], (600,80), cv2.FONT_HERSHEY_PLAIN,1,(0,255,0),0,1)
cv2.putText(img, "Preprocess (p): ", (465,100), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
cv2.putText(img, onoff[preprocess_on], (600,100), cv2.FONT_HERSHEY_PLAIN,1,(0,255,0),0,1)
#
# Legend at the bottom
#
cv2.putText(img, "Learn (l): Recompute LDA matrix (r): Quit (<ESC>) Subjects("+str(current_max_subjects)+")", (10,470), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
#
# Print list of recently seen people
#
cv2.putText(img, "Recently seen: ", (10,20), cv2.FONT_HERSHEY_PLAIN,1,(255,0,0),0,1)
current_time = time.time()
count = 0
for person in greetingdict:
if current_time - greetingdict[person] > GREETINGREPEAT:
#
# Person has been in database too long, remove them
#
del greetingdict[person]
#
# Since we changed the dictionary we're iterating on, STEP SINCE THIS IS A BIG PYTHON NO-NO.
# We'll remove older entries the next time around
#
break
else:
count += 1
cv2.putText(img, person, (10,20+count*20), cv2.FONT_HERSHEY_PLAIN,1,(0,255,0),0,1)
#
# Finally, after all of this, go ahead and show the image in the window
#
cv2.imshow('Identification Window',img)
#
# Now, wait for a key (but only for 10 ms). If we don't get a key, just start the loop over
# If we get a key, act on that key
#
key = cv2.waitKey(10)
#print "Key value:", key
if key == 27: # the <ESC> key
#
# End the program
break
elif key == ord('l'):
#
# Go get the new images, but don't load them yet since we might be learning more people
#
current_state = "Learning"
current_saved = 0
name = raw_input("Subject's name?: ")
if not os.path.exists(pathdir+name):
os.makedirs(pathdir+name)
started_learning = time.time()
elif key == ord('r'):
#
# Tell the loop to re-load the database with the new images
#
current_state = "Loading"
elif key == 2490368: # Up arrow
current_threshold += 10
elif key == 2621440: # Down arrow
current_threshold -= 10
if current_threshold < 10:
current_threshold = 10;
elif key == 2424832: # Left arrow
current_max_subjects -= 1
if current_max_subjects < 1:
current_max_subjects = 1
elif key == 2555904: # Right arrow
current_max_subjects += 1
elif key == ord('e'):
details_on = not details_on
elif key == ord('v'):
voice_on = not voice_on
elif key == ord('b'):
bestguess_on = not bestguess_on
elif key == ord('d'):
debug_on = not debug_on
elif key == ord('p'):
preprocess_on = not preprocess_on
if not preprocess_on:
# This doesn't use any preprocessing of images TanTriggsPreprocessing
current_threshold = IDENTIFIED_THRESHOLD
model = PredictableModel(Fisherfaces(), NearestNeighbor())
else:
# This uses TanTriggsPreprocessing to account for lighting differences
# This is "on" by default
# This is likely to get better guesses, so we narrow down the threshold
# to prevent mis-identifying
current_threshold = IDENTIFIED_THRESHOLD/TANTRIGGS_SCALER
feature = ChainOperator(TanTriggsPreprocessing(), Fisherfaces())
model = PredictableModel(feature, NearestNeighbor())
current_state = "Loading"
cv2.destroyAllWindows()
vc.release()
| apache-2.0 | 2,895,189,796,406,926,300 | 40.957569 | 172 | 0.60486 | false |
muzixing/ryu | ryu/gui/controller.py | 5 | 2813 | #!/usr/bin/env python
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import sys
import logging
import inspect
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from flask import Flask, request, abort
from views.view_base import ViewBase
parser = ArgumentParser()
parser.add_argument('--host', dest='host', default='0.0.0.0')
parser.add_argument('--port', dest='port', type=int, default=8000)
args = parser.parse_args()
app = Flask(__name__.split('.')[0])
logging.basicConfig(level=logging.DEBUG,
stream=sys.stderr,
format="%(asctime)-15s [%(levelname)-4s] %(message)s")
#handler = logging.FileHandler("/tmp/ryu_gui.log", encoding="utf8")
#app.logger.addHandler(handler)
@app.before_request
def before_request_trigger():
pass
@app.after_request
def after_request_trigger(response):
return response
@app.route('/')
def index():
return _view('topology')
@app.route('/stats/flow', methods=['POST'])
def flow_mod():
return _view('flow', request.form.get('host'), request.form.get('port'),
request.form.get('dpid'), request.form.get('flows'))
@app.route('/websocket')
def websocket():
if request.environ.get('wsgi.websocket'):
ws = request.environ['wsgi.websocket']
return _view('websocket', ws)
abort(404)
def _view(view_name, *args, **kwargs):
view_name = 'views.' + view_name
try:
__import__(view_name)
except ImportError:
app.logger.error('ImportError (%s)', view_name)
abort(500)
mod = sys.modules.get(view_name)
clases = inspect.getmembers(mod, lambda cls: (inspect.isclass(cls) and
issubclass(cls, ViewBase)))
try:
view = clases[0][1](*args, **kwargs)
except IndexError:
app.logger.error('has not View class (%s)', view_name)
abort(500)
app.logger.debug('view loaded. %s', view_name)
return view.run()
if __name__ == '__main__':
server = pywsgi.WSGIServer((args.host, args.port),
app, handler_class=WebSocketHandler)
app.logger.info('Running on %s', server.address)
server.serve_forever()
| apache-2.0 | 4,218,499,066,368,941,000 | 29.247312 | 77 | 0.659438 | false |
rui7157/Daily-code | proxyip/proxyip.py | 1 | 2079 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-05-17 17:29:31
# @Author : NvRay ([email protected])
import requests
import re
import os
from gevent import monkey
monkey.patch_socket()
import gevent
address = {
"国内高匿": "nn",
"国内高透": "nt",
"国外高匿": "wn",
"国外高透": "wt"
}
save_file = os.path.join(os.path.dirname(__file__), "proxyip.txt")
requests = requests.session()
class Proxy(object):
def __init__(self, page, addr):
self.page = page
self.data = list()
self.addr = address.get(addr)
def web(self, page):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Host": "www.xicidaili.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate"
}
if page == 0:
web = requests.get("http://www.xicidaili.com/nt",
headers=headers,proxies={"HTTP":"101.21.100.106:8888"}).text
else:
web = requests.get("http://www.xicidaili.com/{addr}/{page}".format(
page=page, addr=self.addr), headers=headers).text
data = re.findall(
r'<tr class="odd">[\s\S]*?<td>(.*?)</td>[\s\S]*?<td>(.*?)</td>[\s\S]*?">(.*?)</a>', web)
for info in data:
self.data.append({
"ip": info[0],
"port": info[1],
"addr": info[2]
})
def run(self):
gevent.joinall([gevent.spawn(self.web, p) for p in range(self.page)])
return self.data
if __name__ == '__main__':
ip = Proxy(300, "国内高匿").run()
ipdata=dict()
for i in ip:
with open(save_file, "a") as f:
f.write(u"{ip} {port} {addr}\n".format(ip=i.get("ip"),
port=i.get("port"), addr=i.get("addr")).encode("utf-8"))
print u"ip:%s port:%s addr:%s" % (i.get("ip"), i.get("port"), i.get("addr"))
| gpl-3.0 | 8,107,028,449,022,860,000 | 30.369231 | 107 | 0.503188 | false |
h4ck3rm1k3/gobject-introspection | giscanner/maintransformer.py | 1 | 55890 | # -*- Mode: Python -*-
# Copyright (C) 2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import re
from . import ast
from . import message
from .annotationparser import (TAG_VFUNC, TAG_SINCE, TAG_DEPRECATED, TAG_RETURNS,
TAG_ATTRIBUTES, TAG_RENAME_TO, TAG_TYPE,
TAG_UNREF_FUNC, TAG_REF_FUNC, TAG_SET_VALUE_FUNC,
TAG_GET_VALUE_FUNC, TAG_VALUE)
from .annotationparser import (OPT_ALLOW_NONE, OPT_ARRAY, OPT_ATTRIBUTE,
OPT_ELEMENT_TYPE, OPT_IN, OPT_INOUT,
OPT_INOUT_ALT, OPT_OUT, OPT_SCOPE,
OPT_OUT_CALLER_ALLOCATES, OPT_OUT_CALLEE_ALLOCATES,
OPT_TYPE, OPT_CLOSURE, OPT_DESTROY, OPT_TRANSFER, OPT_SKIP,
OPT_FOREIGN, OPT_ARRAY_FIXED_SIZE,
OPT_ARRAY_LENGTH, OPT_ARRAY_ZERO_TERMINATED,
OPT_CONSTRUCTOR, OPT_METHOD,
OPT_TRANSFER_NONE, OPT_TRANSFER_FLOATING)
from .annotationparser import AnnotationParser
from .transformer import TransformerException
from .utils import to_underscores, to_underscores_noprefix
class MainTransformer(object):
def __init__(self, transformer, blocks):
self._transformer = transformer
self._blocks = blocks
self._namespace = transformer.namespace
self._uscore_type_names = {}
# Public API
def transform(self):
contents = list(self._namespace.itervalues())
if len(contents) == 0:
message.fatal("""Namespace is empty; likely causes are:
* Not including .h files to be scanned
* Broken --identifier-prefix
""")
# Some initial namespace surgery
self._namespace.walk(self._pass_fixup_hidden_fields)
# We have a rough tree which should have most of of the types
# we know about. Let's attempt closure; walk over all of the
# Type() types and see if they match up with something.
self._namespace.walk(self._pass_type_resolution)
# Read in annotations needed early
self._namespace.walk(self._pass_read_annotations_early)
# Determine some default values for transfer etc.
# based on the current tree.
self._namespace.walk(self._pass_callable_defaults)
# Read in most annotations now.
self._namespace.walk(self._pass_read_annotations)
# Now that we've possibly seen more types from annotations,
# do another type resolution pass.
self._namespace.walk(self._pass_type_resolution)
# Generate a reverse mapping "bar_baz" -> BarBaz
for node in self._namespace.itervalues():
if isinstance(node, ast.Registered) and node.get_type is not None:
self._uscore_type_names[node.c_symbol_prefix] = node
elif isinstance(node, (ast.Record, ast.Union)):
uscored = to_underscores_noprefix(node.name).lower()
self._uscore_type_names[uscored] = node
for node in list(self._namespace.itervalues()):
if isinstance(node, ast.Function):
# Discover which toplevel functions are actually methods
self._pair_function(node)
if isinstance(node, (ast.Class, ast.Interface)):
self._pair_class_virtuals(node)
# Some annotations need to be post function pairing
self._namespace.walk(self._pass_read_annotations2)
# Another type resolution pass after we've parsed virtuals, etc.
self._namespace.walk(self._pass_type_resolution)
self._namespace.walk(self._pass3)
# TODO - merge into pass3
self._pair_quarks_with_enums()
# Private
def _pass_fixup_hidden_fields(self, node, chain):
"""Hide all callbacks starting with _; the typical
usage is void (*_gtk_reserved1)(void);"""
if not isinstance(node, (ast.Class, ast.Interface,
ast.Record, ast.Union)):
return True
for field in node.fields:
if field is None:
continue
if (field.name.startswith('_')
and field.anonymous_node is not None
and isinstance(field.anonymous_node, ast.Callback)):
field.introspectable = False
return True
def _get_validate_parameter_name(self, parent, param_name, origin):
try:
param = parent.get_parameter(param_name)
except ValueError, e:
param = None
if param is None:
if isinstance(origin, ast.Parameter):
origin_name = 'parameter %s' % (origin.argname, )
else:
origin_name = 'return value'
message.log_node(
message.FATAL, parent,
"can't find parameter %s referenced by %s of %r"
% (param_name, origin_name, parent.name))
return param.argname
def _apply_annotation_rename_to(self, node, chain, block):
if not block:
return
rename_to = block.get(TAG_RENAME_TO)
if not rename_to:
return
rename_to = rename_to.value
target = self._namespace.get_by_symbol(rename_to)
if not target:
message.warn_node(node,
"Can't find symbol %r referenced by Rename annotation" % (
rename_to, ))
elif target.shadowed_by:
message.warn_node(node,
"Function %r already shadowed by %r, can't overwrite with %r" % (
target.symbol,
target.shadowed_by,
rename_to))
elif target.shadows:
message.warn_node(node,
"Function %r already shadows %r, can't multiply shadow with %r" % (
target.symbol,
target.shadows,
rename_to))
else:
target.shadowed_by = node.name
node.shadows = target.name
def _apply_annotations_function(self, node, chain):
block = self._blocks.get(node.symbol)
self._apply_annotations_callable(node, chain, block)
def _pass_read_annotations_early(self, node, chain):
if isinstance(node, ast.Record):
if node.ctype is not None:
block = self._blocks.get(node.ctype)
else:
block = self._blocks.get(node.c_name)
self._apply_annotations_annotated(node, block)
return True
def _pass_callable_defaults(self, node, chain):
if isinstance(node, (ast.Callable, ast.Signal)):
for param in node.parameters:
if param.transfer is None:
param.transfer = self._get_transfer_default(node, param)
if node.retval.transfer is None:
node.retval.transfer = self._get_transfer_default(node, node.retval)
return True
def _get_annotation_name(self, node):
if isinstance(node, (ast.Class, ast.Interface, ast.Record,
ast.Union, ast.Enum, ast.Bitfield,
ast.Callback, ast.Alias)):
if node.ctype is not None:
return node.ctype
elif isinstance(node, ast.Registered) and node.gtype_name is not None:
return node.gtype_name
return node.c_name
raise AssertionError("Unhandled node %r" % (node, ))
def _get_block(self, node):
return self._blocks.get(self._get_annotation_name(node))
def _pass_read_annotations(self, node, chain):
if not node.namespace:
return False
if isinstance(node, ast.Alias):
self._apply_annotations_alias(node, chain)
if isinstance(node, ast.Function):
self._apply_annotations_function(node, chain)
if isinstance(node, ast.Callback):
self._apply_annotations_callable(node, chain, block = self._get_block(node))
if isinstance(node, (ast.Class, ast.Interface, ast.Union, ast.Enum,
ast.Bitfield, ast.Callback)):
self._apply_annotations_annotated(node, self._get_block(node))
if isinstance(node, (ast.Class, ast.Interface, ast.Record, ast.Union)):
block = self._get_block(node)
for field in node.fields:
self._apply_annotations_field(node, block, field)
name = self._get_annotation_name(node)
section_name = 'SECTION:' + name.lower()
block = self._blocks.get(section_name)
if block:
node.doc = block.comment
if isinstance(node, (ast.Class, ast.Interface)):
for prop in node.properties:
self._apply_annotations_property(node, prop)
for sig in node.signals:
self._apply_annotations_signal(node, sig)
if isinstance(node, ast.Class):
block = self._get_block(node)
if block:
tag = block.get(TAG_UNREF_FUNC)
node.unref_func = tag.value if tag else None
tag = block.get(TAG_REF_FUNC)
node.ref_func = tag.value if tag else None
tag = block.get(TAG_SET_VALUE_FUNC)
node.set_value_func = tag.value if tag else None
tag = block.get(TAG_GET_VALUE_FUNC)
node.get_value_func = tag.value if tag else None
if isinstance(node, ast.Constant):
self._apply_annotations_constant(node)
return True
def _adjust_container_type(self, parent, node, options):
has_element_type = OPT_ELEMENT_TYPE in options
has_array = OPT_ARRAY in options
if has_array:
self._apply_annotations_array(parent, node, options)
elif has_element_type:
self._apply_annotations_element_type(parent, node, options)
if isinstance(node.type, ast.Array):
self._check_array_element_type(node.type, options)
def _resolve(self, type_str, type_node=None, node=None, parent=None):
def grab_one(type_str, resolver, top_combiner, combiner):
"""Return a complete type, and the trailing string part after it.
Use resolver() on each identifier, and combiner() on the parts of
each complete type. (top_combiner is used on the top-most type.)"""
bits = re.split(r'([,<>()])', type_str, 1)
first, sep, rest = [bits[0], '', ''] if (len(bits)==1) else bits
args = [resolver(first)]
if sep == '<' or sep == '(':
lastsep = '>' if (sep == '<') else ')'
while sep != lastsep:
next, rest = grab_one(rest, resolver, combiner, combiner)
args.append(next)
sep, rest = rest[0], rest[1:]
else:
rest = sep + rest
return top_combiner(*args), rest
def resolver(ident):
res = self._transformer.create_type_from_user_string(ident)
return res
def combiner(base, *rest):
if not rest:
return base
if isinstance(base, ast.List) and len(rest) == 1:
return ast.List(base.name, *rest)
if isinstance(base, ast.Map) and len(rest) == 2:
return ast.Map(*rest)
message.warn(
"Too many parameters in type specification %r" % (type_str, ))
return base
def top_combiner(base, *rest):
if type_node is not None and isinstance(type_node, ast.Type):
base.is_const = type_node.is_const
return combiner(base, *rest)
result, rest = grab_one(type_str, resolver, top_combiner, combiner)
if rest:
message.warn("Trailing components in type specification %r" % (
type_str, ))
if not result.resolved:
position = None
if parent is not None and isinstance(parent, ast.Function):
text = parent.symbol
position = self._get_position(parent, node)
else:
text = type_str
message.warn_node(parent, "%s: Unknown type: %r" %
(text, result.ctype), positions=position)
return result
def _resolve_toplevel(self, type_str, type_node=None, node=None, parent=None):
"""Like _resolve(), but attempt to preserve more attributes of original type."""
result = self._resolve(type_str, type_node=type_node, node=node, parent=parent)
# If we replace a node with a new type (such as an annotated) we
# might lose the ctype from the original node.
if type_node is not None:
result.ctype = type_node.ctype
return result
def _get_position(self, func, param):
block = self._blocks.get(func.symbol)
if block:
if isinstance(param, ast.Parameter):
tag = block.tags.get(param.argname)
elif isinstance(param, ast.Return):
tag = block.tags.get(TAG_RETURNS)
else:
tag = None
if tag.position:
return tag.position
return block.position
def _check_array_element_type(self, array, options):
# GPtrArrays are allowed to contain non basic types
# (except enums and flags) or basic types that are
# as big as a gpointer
if array.array_type == ast.Array.GLIB_PTRARRAY and \
((array.element_type in ast.BASIC_GIR_TYPES
and not array.element_type in ast.POINTER_TYPES) or
isinstance(array.element_type, ast.Enum) or
isinstance(array.element_type, ast.Bitfield)):
message.warn("invalid (element-type) for a GPtrArray, "
"must be a pointer", options.position)
# GByteArrays have (element-type) guint8 by default
if array.array_type == ast.Array.GLIB_BYTEARRAY:
if array.element_type == ast.TYPE_ANY:
array.element_type = ast.TYPE_UINT8
elif not array.element_type in [ast.TYPE_UINT8,
ast.TYPE_INT8,
ast.TYPE_CHAR]:
message.warn("invalid (element-type) for a GByteArray, "
"must be one of guint8, gint8 or gchar",
options.position)
def _apply_annotations_array(self, parent, node, options):
array_opt = options.get(OPT_ARRAY)
if array_opt:
array_values = array_opt.all()
else:
array_values = {}
element_type = options.get(OPT_ELEMENT_TYPE)
if element_type is not None:
element_type_node = self._resolve(element_type.one(),
node.type, node, parent)
elif isinstance(node.type, ast.Array):
element_type_node = node.type.element_type
else:
# We're assuming here that Foo* with an (array) annotation
# and no (element-type) means array of Foo
element_type_node = node.type.clone()
# The element's ctype is the array's dereferenced
if element_type_node.ctype is not None and \
element_type_node.ctype.endswith('*'):
element_type_node.ctype = element_type_node.ctype[:-1]
if isinstance(node.type, ast.Array):
array_type = node.type.array_type
else:
array_type = None
container_type = ast.Array(array_type, element_type_node,
ctype=node.type.ctype,
is_const=node.type.is_const)
if OPT_ARRAY_ZERO_TERMINATED in array_values:
container_type.zeroterminated = array_values.get(
OPT_ARRAY_ZERO_TERMINATED) == '1'
else:
container_type.zeroterminated = False
length = array_values.get(OPT_ARRAY_LENGTH)
if length is not None:
paramname = self._get_validate_parameter_name(parent, length, node)
if paramname:
param = parent.get_parameter(paramname)
param.direction = node.direction
if param.direction == ast.PARAM_DIRECTION_OUT:
param.transfer = ast.PARAM_TRANSFER_FULL
container_type.length_param_name = param.argname
fixed = array_values.get(OPT_ARRAY_FIXED_SIZE)
if fixed:
try:
container_type.size = int(fixed)
except ValueError:
# Already warned in annotationparser.py
return
node.type = container_type
def _apply_annotations_element_type(self, parent, node, options):
element_type_opt = options.get(OPT_ELEMENT_TYPE)
if element_type_opt is None:
message.warn(
'element-type annotation takes at least one option, '
'none given',
options.position)
return
if isinstance(node.type, ast.List):
if element_type_opt.length() != 1:
message.warn(
'element-type annotation for a list must have exactly '
'one option, not %d options' % (element_type_opt.length(), ),
options.position)
return
node.type.element_type = self._resolve(element_type_opt.one(),
node.type, node, parent)
elif isinstance(node.type, ast.Map):
if element_type_opt.length() != 2:
message.warn(
'element-type annotation for a hash table must have exactly '
'two options, not %d option(s)' % (element_type_opt.length(), ),
options.position)
return
element_type = element_type_opt.flat()
node.type.key_type = self._resolve(element_type[0],
node.type, node, parent)
node.type.value_type = self._resolve(element_type[1],
node.type, node, parent)
elif isinstance(node.type, ast.Array):
if element_type_opt.length() != 1:
message.warn(
'element-type annotation for an array must have exactly '
'one option, not %d options' % (element_type_opt.length(), ),
options.position)
return
node.type.element_type = self._resolve(element_type_opt.one(),
node.type, node, parent)
else:
message.warn_node(parent,
"Unknown container %r for element-type annotation" % (node.type, ))
def _get_transfer_default_param(self, parent, node):
if node.direction in [ast.PARAM_DIRECTION_INOUT,
ast.PARAM_DIRECTION_OUT]:
if node.caller_allocates:
return ast.PARAM_TRANSFER_NONE
return ast.PARAM_TRANSFER_FULL
return ast.PARAM_TRANSFER_NONE
def _get_transfer_default_returntype_basic(self, typeval):
if (typeval.is_equiv(ast.BASIC_GIR_TYPES)
or typeval.is_const
or typeval.is_equiv(ast.TYPE_NONE)):
return ast.PARAM_TRANSFER_NONE
elif typeval.is_equiv(ast.TYPE_STRING):
# Non-const strings default to FULL
return ast.PARAM_TRANSFER_FULL
elif typeval.target_fundamental:
# This looks like just GType right now
return None
return None
def _is_gi_subclass(self, typeval, supercls_type):
cls = self._transformer.lookup_typenode(typeval)
assert cls, str(typeval)
supercls = self._transformer.lookup_typenode(supercls_type)
assert supercls
if cls is supercls:
return True
if cls.parent and cls.parent.target_giname != 'GObject.Object':
return self._is_gi_subclass(cls.parent, supercls_type)
return False
def _get_transfer_default_return(self, parent, node):
typeval = node.type
basic = self._get_transfer_default_returntype_basic(typeval)
if basic:
return basic
if not typeval.target_giname:
return None
target = self._transformer.lookup_typenode(typeval)
if isinstance(target, ast.Alias):
return self._get_transfer_default_returntype_basic(target.target)
elif (isinstance(target, ast.Boxed)
or (isinstance(target, (ast.Record, ast.Union))
and (target.gtype_name is not None or target.foreign))):
return ast.PARAM_TRANSFER_FULL
elif isinstance(target, (ast.Enum, ast.Bitfield)):
return ast.PARAM_TRANSFER_NONE
# Handle constructors specially here
elif isinstance(parent, ast.Function) and parent.is_constructor:
if isinstance(target, ast.Class):
initially_unowned_type = ast.Type(target_giname='GObject.InitiallyUnowned')
initially_unowned = self._transformer.lookup_typenode(initially_unowned_type)
if initially_unowned and self._is_gi_subclass(typeval, initially_unowned_type):
return ast.PARAM_TRANSFER_NONE
else:
return ast.PARAM_TRANSFER_FULL
elif isinstance(target, (ast.Record, ast.Union)):
return ast.PARAM_TRANSFER_FULL
else:
raise AssertionError("Invalid constructor")
elif isinstance(target, (ast.Class, ast.Record, ast.Union)):
# Explicitly no default for these
return None
else:
return None
def _get_transfer_default(self, parent, node):
if node.type.is_equiv(ast.TYPE_NONE) or isinstance(node.type, ast.Varargs):
return ast.PARAM_TRANSFER_NONE
elif isinstance(node, ast.Parameter):
return self._get_transfer_default_param(parent, node)
elif isinstance(node, ast.Return):
return self._get_transfer_default_return(parent, node)
elif isinstance(node, ast.Field):
return ast.PARAM_TRANSFER_NONE
elif isinstance(node, ast.Property):
return ast.PARAM_TRANSFER_NONE
else:
raise AssertionError(node)
def _apply_annotations_param_ret_common(self, parent, node, tag):
options = getattr(tag, 'options', {})
param_type = options.get(OPT_TYPE)
if param_type:
node.type = self._resolve_toplevel(param_type.one(),
node.type, node, parent)
caller_allocates = False
annotated_direction = None
if (OPT_INOUT in options or
OPT_INOUT_ALT in options):
annotated_direction = ast.PARAM_DIRECTION_INOUT
elif OPT_OUT in options:
subtype = options[OPT_OUT]
if subtype is not None:
subtype = subtype.one()
annotated_direction = ast.PARAM_DIRECTION_OUT
if subtype in (None, ''):
if node.type.target_giname and node.type.ctype:
target = self._transformer.lookup_giname(node.type.target_giname)
target = self._transformer.resolve_aliases(target)
has_double_indirection = '**' in node.type.ctype
is_structure_or_union = isinstance(target, (ast.Record, ast.Union))
caller_allocates = (not has_double_indirection and is_structure_or_union)
else:
caller_allocates = False
elif subtype == OPT_OUT_CALLER_ALLOCATES:
caller_allocates = True
elif subtype == OPT_OUT_CALLEE_ALLOCATES:
caller_allocates = False
elif OPT_IN in options:
annotated_direction = ast.PARAM_DIRECTION_IN
if (annotated_direction is not None) and (annotated_direction != node.direction):
node.direction = annotated_direction
node.caller_allocates = caller_allocates
# Also reset the transfer default if we're toggling direction
node.transfer = self._get_transfer_default(parent, node)
transfer_tag = options.get(OPT_TRANSFER)
if transfer_tag and transfer_tag.length() == 1:
transfer = transfer_tag.one()
if transfer == OPT_TRANSFER_FLOATING:
transfer = OPT_TRANSFER_NONE
node.transfer = transfer
self._adjust_container_type(parent, node, options)
if (OPT_ALLOW_NONE in options or
node.type.target_giname == 'Gio.AsyncReadyCallback' or
node.type.target_giname == 'Gio.Cancellable'):
node.allow_none = True
if tag is not None and tag.comment is not None:
node.doc = tag.comment
if OPT_SKIP in options:
node.skip = True
if options:
for attribute in options.getall(OPT_ATTRIBUTE):
node.attributes.append(attribute.flat())
def _apply_annotations_annotated(self, node, block):
if block is None:
return
node.doc = block.comment
since_tag = block.get(TAG_SINCE)
if since_tag is not None:
node.version = since_tag.value
deprecated_tag = block.get(TAG_DEPRECATED)
if deprecated_tag is not None:
value = deprecated_tag.value
if ': ' in value:
version, desc = value.split(': ')
else:
desc = value
version = None
node.deprecated = desc
if version is not None:
node.deprecated_version = version
annos_tag = block.get(TAG_ATTRIBUTES)
if annos_tag is not None:
options = AnnotationParser.parse_options(annos_tag, annos_tag.value)
for key, value in options.iteritems():
if value:
node.attributes.append((key, value.one()))
if OPT_SKIP in block.options:
node.skip = True
if OPT_FOREIGN in block.options:
node.foreign = True
if OPT_CONSTRUCTOR in block.options and isinstance(node, ast.Function):
node.is_constructor = True
if OPT_METHOD in block.options:
node.is_method = True
def _apply_annotations_alias(self, node, chain):
block = self._get_block(node)
self._apply_annotations_annotated(node, block)
def _apply_annotations_param(self, parent, param, tag):
if tag:
options = tag.options
else:
options = {}
if isinstance(parent, (ast.Function, ast.VFunction)):
scope = options.get(OPT_SCOPE)
if scope and scope.length() == 1:
param.scope = scope.one()
param.transfer = ast.PARAM_TRANSFER_NONE
destroy = options.get(OPT_DESTROY)
if destroy:
param.destroy_name = self._get_validate_parameter_name(parent,
destroy.one(),
param)
if param.destroy_name is not None:
param.scope = ast.PARAM_SCOPE_NOTIFIED
destroy_param = parent.get_parameter(param.destroy_name)
# This is technically bogus; we're setting the scope on the destroy
# itself. But this helps avoid tripping a warning from finaltransformer,
# since we don't have a way right now to flag this callback a destroy.
destroy_param.scope = ast.PARAM_SCOPE_NOTIFIED
closure = options.get(OPT_CLOSURE)
if closure and closure.length() == 1:
param.closure_name = self._get_validate_parameter_name(parent,
closure.one(),
param)
elif isinstance(parent, ast.Callback):
if OPT_CLOSURE in options:
# For callbacks, (closure) appears without an
# argument, and tags a parameter that is a closure. We
# represent it (weirdly) in the gir and typelib by
# setting param.closure_name to itself.
param.closure_name = param.argname
self._apply_annotations_param_ret_common(parent, param, tag)
def _apply_annotations_return(self, parent, return_, block):
if block:
tag = block.get(TAG_RETURNS)
else:
tag = None
self._apply_annotations_param_ret_common(parent, return_, tag)
def _apply_annotations_params(self, parent, params, block):
declparams = set([])
if parent.instance_parameter:
declparams.add(parent.instance_parameter.argname)
for param in params:
if block:
tag = block.get(param.argname)
else:
tag = None
self._apply_annotations_param(parent, param, tag)
declparams.add(param.argname)
if not block:
return
docparams = set(block.params)
unknown = docparams - declparams
unused = declparams - docparams
for doc_name in unknown:
# Skip varargs, see #629759
if doc_name.lower() in ['...', 'varargs', TAG_RETURNS]:
continue
if len(unused) == 0:
text = ''
elif len(unused) == 1:
(param, ) = unused
text = ', should be %r' % (param, )
else:
text = ', should be one of %s' % (
', '.join(repr(p) for p in unused), )
tag = block.get(doc_name)
message.warn(
'%s: unknown parameter %r in documentation comment%s' % (
block.name, doc_name, text),
tag.position)
def _apply_annotations_callable(self, node, chain, block):
self._apply_annotations_annotated(node, block)
self._apply_annotations_params(node, node.parameters, block)
self._apply_annotations_return(node, node.retval, block)
def _check_arg_annotations(self, parent, params, block):
if block is None:
return
for tag in block.tags.keys():
if tag == TAG_RETURNS:
continue
for param in params:
if param.argname == tag:
break
else:
message.warn(
"Annotation for '%s' refers to unknown argument '%s'"
% (parent.name, tag))
def _apply_annotations_field(self, parent, block, field):
if not block:
return
tag = block.get(field.name)
if not tag:
return
t = tag.options.get(OPT_TYPE)
if t:
field.type = self._transformer.create_type_from_user_string(t.one())
try:
self._adjust_container_type(parent, field, tag.options)
except AttributeError:
pass
def _apply_annotations_property(self, parent, prop):
prefix = self._get_annotation_name(parent)
block = self._blocks.get('%s:%s' % (prefix, prop.name))
self._apply_annotations_annotated(prop, block)
if not block:
return
transfer_tag = block.get(OPT_TRANSFER)
if transfer_tag is not None:
transfer = transfer_tag.value
if transfer == OPT_TRANSFER_FLOATING:
transfer = OPT_TRANSFER_NONE
prop.transfer = transfer
else:
prop.transfer = self._get_transfer_default(parent, prop)
type_tag = block.get(TAG_TYPE)
if type_tag:
prop.type = self._resolve_toplevel(type_tag.value, prop.type, prop, parent)
def _apply_annotations_signal(self, parent, signal):
prefix = self._get_annotation_name(parent)
block = self._blocks.get('%s::%s' % (prefix, signal.name))
self._apply_annotations_annotated(signal, block)
# We're only attempting to name the signal parameters if
# the number of parameter tags (@foo) is the same or greater
# than the number of signal parameters
if block and len(block.tags) > len(signal.parameters):
names = block.tags.items()
# Resolve real parameter names early, so that in later
# phase we can refer to them while resolving annotations.
for i, param in enumerate(signal.parameters):
param.argname, tag = names[i+1]
else:
names = []
for i, param in enumerate(signal.parameters):
if names:
name, tag = names[i+1]
options = getattr(tag, 'options', {})
param_type = options.get(OPT_TYPE)
if param_type:
param.type = self._resolve_toplevel(param_type.one(), param.type,
param, parent)
else:
tag = None
self._apply_annotations_param(signal, param, tag)
self._apply_annotations_return(signal, signal.retval, block)
def _apply_annotations_constant(self, node):
block = self._blocks.get(node.ctype)
if not block:
return
tag = block.get(TAG_VALUE)
if tag:
node.value = tag.value
def _pass_read_annotations2(self, node, chain):
if isinstance(node, ast.Function):
self._apply_annotations2_function(node, chain)
return True
def _apply_annotations2_function(self, node, chain):
block = self._blocks.get(node.symbol)
self._apply_annotation_rename_to(node, chain, block)
# Handle virtual invokers
parent = chain[-1] if chain else None
if not (block and parent):
return
virtual = block.get(TAG_VFUNC)
if not virtual:
return
invoker_name = virtual.value
matched = False
for vfunc in parent.virtual_methods:
if vfunc.name == invoker_name:
matched = True
vfunc.invoker = node.name
# Also merge in annotations
self._apply_annotations_callable(vfunc, [parent], block)
break
if not matched:
message.warn_node(node,
"Virtual slot %r not found for %r annotation" % (invoker_name, TAG_VFUNC))
def _resolve_and_filter_type_list(self, typelist):
"""Given a list of Type instances, return a new list of types with
the ones that failed to resolve removed."""
# Create a copy we'll modify
new_typelist = list(typelist)
for typeval in typelist:
resolved = self._transformer.resolve_type(typeval)
if not resolved:
new_typelist.remove(typeval)
return new_typelist
def _pass_type_resolution(self, node, chain):
if isinstance(node, ast.Alias):
self._transformer.resolve_type(node.target)
if isinstance(node, ast.Callable):
for parameter in node.parameters:
self._transformer.resolve_type(parameter.type)
self._transformer.resolve_type(node.retval.type)
if isinstance(node, ast.Constant):
self._transformer.resolve_type(node.value_type)
if isinstance(node, (ast.Class, ast.Interface, ast.Record, ast.Union)):
for field in node.fields:
if field.anonymous_node:
pass
else:
self._transformer.resolve_type(field.type)
if isinstance(node, (ast.Class, ast.Interface)):
resolved_parent = None
for parent in node.parent_chain:
try:
self._transformer.resolve_type(parent)
except ValueError, e:
continue
target = self._transformer.lookup_typenode(parent)
if target:
node.parent = parent
break
else:
if isinstance(node, ast.Interface):
node.parent = ast.Type(target_giname='GObject.Object')
for prop in node.properties:
self._transformer.resolve_type(prop.type)
for sig in node.signals:
for param in sig.parameters:
self._transformer.resolve_type(param.type)
if isinstance(node, ast.Class):
node.interfaces = self._resolve_and_filter_type_list(node.interfaces)
if isinstance(node, ast.Interface):
node.prerequisites = self._resolve_and_filter_type_list(node.prerequisites)
return True
def _pair_quarks_with_enums(self):
# self._uscore_type_names is an authoritative mapping of types
# to underscored versions, since it is based on get_type() methods;
# but only covers enums that are registered as GObject enums.
# Create a fallback mapping based on all known enums in this module.
uscore_enums = {}
for enum in self._namespace.itervalues():
if not isinstance(enum, ast.Enum):
continue
type_name = enum.ctype
uscored = to_underscores(type_name).lower()
uscore_enums[uscored] = enum
try:
no_uscore_prefixed = self._transformer.strip_identifier(type_name)
except TransformerException, e:
message.warn(e)
no_uscore_prefixed = None
if no_uscore_prefixed not in uscore_enums:
uscore_enums[no_uscore_prefixed] = enum
for node in self._namespace.itervalues():
if not isinstance(node, ast.ErrorQuarkFunction):
continue
short = node.symbol[:-len('_quark')]
if short == "g_io_error":
# Special case; GIOError was already taken forcing GIOErrorEnum
assert self._namespace.name == 'Gio'
enum = self._namespace.get('IOErrorEnum')
else:
enum = self._uscore_type_names.get(short)
if enum is None:
enum = uscore_enums.get(short)
if enum is not None:
enum.error_domain = node.error_domain
else:
message.warn_node(node,
"""%s: Couldn't find corresponding enumeration""" % (node.symbol, ))
def _split_uscored_by_type(self, uscored):
"""'uscored' should be an un-prefixed uscore string. This
function searches through the namespace for the longest type which
prefixes uscored, and returns (type, suffix). Example, assuming
namespace Gtk, type is TextBuffer:
_split_uscored_by_type(text_buffer_try_new) -> (ast.Class(TextBuffer), 'try_new')"""
node = None
count = 0
prev_split_count = -1
while True:
components = uscored.rsplit('_', count)
if len(components) == prev_split_count:
return None
prev_split_count = len(components)
type_string = components[0]
node = self._uscore_type_names.get(type_string)
if node:
return (node, '_'.join(components[1:]))
count += 1
def _pair_function(self, func):
"""Check to see whether a toplevel function should be a
method or constructor of some type."""
# Ignore internal symbols and type metadata functions
if func.symbol.startswith('_') or func.is_type_meta_function():
return
(ns, subsymbol) = self._transformer.split_csymbol(func.symbol)
assert ns == self._namespace
if self._is_constructor(func, subsymbol):
self._set_up_constructor(func, subsymbol)
return
elif self._is_method(func, subsymbol):
self._setup_method(func, subsymbol)
return
elif self._pair_static_method(func, subsymbol):
return
def _uscored_identifier_for_type(self, typeval):
"""Given a Type(target_giname='Foo.BarBaz'), return 'bar_baz'."""
name = typeval.get_giname()
return to_underscores_noprefix(name).lower()
def _is_method(self, func, subsymbol):
if not func.parameters:
if func.is_method:
message.warn_node(func,
'%s: Methods must have parameters' % (func.symbol, ))
return False
first = func.parameters[0]
target = self._transformer.lookup_typenode(first.type)
if not isinstance(target, (ast.Class, ast.Interface,
ast.Record, ast.Union,
ast.Boxed)):
if func.is_method:
message.warn_node(func,
'%s: Methods must have a pointer as their first '
'parameter' % (func.symbol, ))
return False
if target.namespace != self._namespace:
if func.is_method:
message.warn_node(func,
'%s: Methods must belong to the same namespace as the '
'class they belong to' % (func.symbol, ))
return False
# A quick hack here...in the future we should catch C signature/GI signature
# mismatches in a general way in finaltransformer
if first.type.ctype is not None and first.type.ctype.count('*') > 1:
return False
if not func.is_method:
uscored_prefix = self._get_uscored_prefix(func, subsymbol)
if not subsymbol.startswith(uscored_prefix):
return False
return True
def _setup_method(self, func, subsymbol):
uscored_prefix = self._get_uscored_prefix(func, subsymbol)
target = self._transformer.lookup_typenode(func.parameters[0].type)
func.instance_parameter = func.parameters.pop(0)
self._namespace.float(func)
if not func.is_method:
subsym_idx = func.symbol.find(subsymbol)
func.name = func.symbol[(subsym_idx + len(uscored_prefix) + 1):]
func.is_method = True
target.methods.append(func)
def _get_uscored_prefix(self, func, subsymbol):
# Here we check both the c_symbol_prefix and (if that fails),
# attempt to do a default uscoring of the type. The reason we
# look at a default underscore transformation is for
# gdk_window_object_get_type(), which says to us that the
# prefix is "gdk_window_object", when really it's just
# "gdk_window". Possibly need an annotation to override this.
prefix_matches = False
uscored_prefix = None
first_arg = func.parameters[0]
target = self._transformer.lookup_typenode(first_arg.type)
if hasattr(target, 'c_symbol_prefix') and target.c_symbol_prefix is not None:
prefix_matches = subsymbol.startswith(target.c_symbol_prefix)
if prefix_matches:
uscored_prefix = target.c_symbol_prefix
if not prefix_matches:
uscored_prefix = self._uscored_identifier_for_type(first_arg.type)
return uscored_prefix
def _pair_static_method(self, func, subsymbol):
split = self._split_uscored_by_type(subsymbol)
if split is None:
return False
(node, funcname) = split
if funcname == '':
return False
if isinstance(node, ast.Class):
self._namespace.float(func)
func.name = funcname
node.static_methods.append(func)
return True
elif isinstance(node, (ast.Interface, ast.Record, ast.Union,
ast.Boxed, ast.Enum, ast.Bitfield)):
# prior to the introduction of this part of the code, only
# ast.Class could have static methods. so for backwards
# compatibility, instead of removing the func from the namespace,
# leave it there and get a copy instead. modify the copy and push
# it onto static_methods. we need to copy the parameters list
# separately, because in the third pass functions are flagged as
# 'throws' depending on the presence of a GError parameter which is
# then removed from the parameters list. without the explicit
# copy, only one of the two functions would thus get flagged as
# 'throws'. clone() does this for us.
new_func = func.clone()
new_func.name = funcname
node.static_methods.append(new_func)
# flag the func as a backwards-comptability kludge (thus it will
# get pruned in the introspectable pass if introspectable=0).
func.moved_to = node.name + '.' + new_func.name
return True
return False
def _set_up_constructor(self, func, subsymbol):
self._namespace.float(func)
func.name = self._get_constructor_name(func, subsymbol)
origin_node = self._get_constructor_class(func, subsymbol)
origin_node.constructors.append(func)
func.is_constructor = True
# Constructors have default return semantics
if not func.retval.transfer:
func.retval.transfer = self._get_transfer_default_return(func,
func.retval)
def _get_constructor_class(self, func, subsymbol):
origin_node = None
split = self._split_uscored_by_type(subsymbol)
if split is None:
if func.is_constructor:
origin_node = self._transformer.lookup_typenode(func.retval.type)
else:
origin_node, _ = split
return origin_node
def _get_constructor_name(self, func, subsymbol):
name = None
split = self._split_uscored_by_type(subsymbol)
if split is None:
if func.is_constructor:
name = func.name
else:
_, name = split
return name
def _guess_constructor_by_name(self, symbol):
# Normal constructors, gtk_button_new etc
if symbol.endswith('_new'):
return True
# Alternative constructor, gtk_button_new_with_label
if '_new_' in symbol:
return True
# gtk_list_store_newv,gtk_tree_store_newv etc
if symbol.endswith('_newv'):
return True
return False
def _is_constructor(self, func, subsymbol):
# func.is_constructor will be True if we have a (constructor) annotation
if not func.is_constructor:
if not self._guess_constructor_by_name(func.symbol):
return False
target = self._transformer.lookup_typenode(func.retval.type)
if not (isinstance(target, ast.Class)
or (isinstance(target, (ast.Record, ast.Union, ast.Boxed))
and (target.get_type is not None or target.foreign))):
if func.is_constructor:
message.warn_node(func,
'%s: Constructors must return an instance of their class'
% (func.symbol, ))
return False
origin_node = self._get_constructor_class(func, subsymbol)
if origin_node is None:
message.warn_node(func,
"Can't find matching type for constructor; symbol=%r" \
% (func.symbol, ))
return False
# Some sanity checks; only objects and boxeds can have ctors
if not (isinstance(origin_node, ast.Class)
or (isinstance(origin_node, (ast.Record, ast.Union, ast.Boxed))
and (origin_node.get_type is not None or origin_node.foreign))):
return False
# Verify the namespace - don't want to append to foreign namespaces!
if origin_node.namespace != self._namespace:
if func.is_constructor:
message.warn_node(func,
'%s: Constructors must belong to the same namespace as the '
'class they belong to' % (func.symbol, ))
return False
# If it takes the object as a first arg, guess it's not a constructor
if not func.is_constructor and len(func.parameters) > 0:
first_arg = self._transformer.lookup_typenode(func.parameters[0].type)
if (first_arg is not None) and first_arg.gi_name == origin_node.gi_name:
return False
if isinstance(target, ast.Class):
parent = origin_node
while parent and (not parent.gi_name == 'GObject.Object'):
if parent == target:
break
if parent.parent:
parent = self._transformer.lookup_typenode(parent.parent)
else:
parent = None
if parent is None:
message.warn_node(func,
"Return value is not superclass for constructor; "
"symbol=%r constructed=%r return=%r" % (
func.symbol,
str(origin_node.create_type()),
str(func.retval.type)))
return False
else:
if origin_node != target:
message.warn_node(func,
"Constructor return type mismatch symbol=%r "
"constructed=%r return=%r" % (
func.symbol,
str(origin_node.create_type()),
str(func.retval.type)))
return False
return True
def _pair_class_virtuals(self, node):
"""Look for virtual methods from the class structure."""
if not node.glib_type_struct:
# https://bugzilla.gnome.org/show_bug.cgi?id=629080
#message.warn_node(node,
# "Failed to find class structure for %r" % (node.name, ))
return
node_type = node.create_type()
class_struct = self._transformer.lookup_typenode(node.glib_type_struct)
# Object class fields are assumed to be read-only
# (see also _introspect_object and transformer.py)
for field in class_struct.fields:
if isinstance(field, ast.Field):
field.writable = False
for field in class_struct.fields:
if not isinstance(field.anonymous_node, ast.Callback):
continue
callback = field.anonymous_node
# Check the first parameter is the object
if len(callback.parameters) == 0:
continue
firstparam_type = callback.parameters[0].type
if firstparam_type != node_type:
continue
vfunc = ast.VFunction.from_callback(callback)
vfunc.instance_parameter = callback.parameters[0]
vfunc.inherit_file_positions(callback)
prefix = self._get_annotation_name(class_struct)
block = self._blocks.get('%s::%s' % (prefix, vfunc.name))
self._apply_annotations_callable(vfunc, [node], block)
node.virtual_methods.append(vfunc)
# Take the set of virtual methods we found, and try
# to pair up with any matching methods using the
# name+signature.
for vfunc in node.virtual_methods:
for method in node.methods:
if method.name != vfunc.name:
continue
if method.retval.type != vfunc.retval.type:
continue
if len(method.parameters) != len(vfunc.parameters):
continue
for i in xrange(len(method.parameters)):
m_type = method.parameters[i].type
v_type = vfunc.parameters[i].type
if m_type != v_type:
continue
vfunc.invoker = method.name
# Apply any annotations we have from the invoker to
# the vfunc
block = self._blocks.get(method.symbol)
self._apply_annotations_callable(vfunc, [], block)
break
def _pass3(self, node, chain):
"""Pass 3 is after we've loaded GType data and performed type
closure."""
if isinstance(node, ast.Callable):
self._pass3_callable_callbacks(node)
self._pass3_callable_throws(node)
return True
def _pass3_callable_callbacks(self, node):
"""Check to see if we have anything that looks like a
callback+user_data+GDestroyNotify set."""
params = node.parameters
# First, do defaults for well-known callback types
for i, param in enumerate(params):
argnode = self._transformer.lookup_typenode(param.type)
if isinstance(argnode, ast.Callback):
if param.type.target_giname in ('Gio.AsyncReadyCallback',
'GLib.DestroyNotify'):
param.scope = ast.PARAM_SCOPE_ASYNC
param.transfer = ast.PARAM_TRANSFER_NONE
callback_param = None
for i, param in enumerate(params):
argnode = self._transformer.lookup_typenode(param.type)
is_destroynotify = False
if isinstance(argnode, ast.Callback):
if param.type.target_giname == 'GLib.DestroyNotify':
is_destroynotify = True
else:
callback_param = param
continue
if callback_param is None:
continue
if is_destroynotify:
callback_param.destroy_name = param.argname
callback_param.scope = ast.PARAM_SCOPE_NOTIFIED
callback_param.transfer = ast.PARAM_TRANSFER_NONE
elif (param.type.is_equiv(ast.TYPE_ANY) and
param.argname is not None and
param.argname.endswith('data')):
callback_param.closure_name = param.argname
def _pass3_callable_throws(self, node):
"""Check to see if we have anything that looks like a
callback+user_data+GDestroyNotify set."""
if not node.parameters:
return
last_param = node.parameters[-1]
# Checking type.name=='GLib.Error' generates false positives
# on methods that take a 'GError *'
if last_param.type.ctype == 'GError**':
node.parameters.pop()
node.throws = True
| gpl-2.0 | 2,615,259,377,961,729,500 | 41.534247 | 95 | 0.564019 | false |
jonathanmeier5/teamstore | saleor/dashboard/order/urls.py | 7 | 1645 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.order_list, name='orders'),
url(r'^(?P<order_pk>\d+)/$',
views.order_details, name='order-details'),
url(r'^(?P<order_pk>\d+)/add-note/$',
views.order_add_note, name='order-add-note'),
url(r'^(?P<order_pk>\d+)/cancel/$',
views.cancel_order, name='order-cancel'),
url(r'^(?P<order_pk>\d+)/address/(?P<address_type>billing|shipping)/$',
views.address_view, name='address-edit'),
url(r'^(?P<order_pk>\d+)/payment/(?P<payment_pk>\d+)/capture/$',
views.capture_payment, name='capture-payment'),
url(r'^(?P<order_pk>\d+)/payment/(?P<payment_pk>\d+)/release/$',
views.release_payment, name='release-payment'),
url(r'^(?P<order_pk>\d+)/payment/(?P<payment_pk>\d+)/refund/$',
views.refund_payment, name='refund-payment'),
url(r'^(?P<order_pk>\d+)/line/(?P<line_pk>\d+)/change/$',
views.orderline_change_quantity, name='orderline-change-quantity'),
url(r'^(?P<order_pk>\d+)/line/(?P<line_pk>\d+)/split/$',
views.orderline_split, name='orderline-split'),
url(r'^(?P<order_pk>\d+)/line/(?P<line_pk>\d+)/cancel/$',
views.orderline_cancel, name='orderline-cancel'),
url(r'^(?P<order_pk>\d+)/remove-voucher/$',
views.remove_order_voucher, name='order-remove-voucher'),
url(r'^(?P<order_pk>\d+)/shipment/(?P<group_pk>\d+)/ship/$',
views.ship_delivery_group, name='ship-delivery-group'),
url(r'^(?P<order_pk>\d+)/shipment/(?P<group_pk>\d+)/cancel/$',
views.cancel_delivery_group, name='cancel-delivery-group')]
| bsd-3-clause | 9,109,589,587,332,569,000 | 46 | 75 | 0.598176 | false |
bazz-erp/erpnext | erpnext/schools/doctype/assessment_result/assessment_result.py | 18 | 1392 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.model.document import Document
from erpnext.schools.api import get_grade
from erpnext.schools.api import get_assessment_details
class AssessmentResult(Document):
def validate(self):
self.grading_scale = frappe.db.get_value("Assessment Plan", self.assessment_plan, "grading_scale")
self.validate_maximum_score()
self.validate_grade()
def validate_maximum_score(self):
self.maximum_score = frappe.db.get_value("Assessment Plan", self.assessment_plan, "maximum_assessment_score")
assessment_details = get_assessment_details(self.assessment_plan)
max_scores = {}
for d in assessment_details:
max_scores.update({d.assessment_criteria: d.maximum_score})
for d in self.details:
d.maximum_score = max_scores.get(d.assessment_criteria)
if d.score > d.maximum_score:
frappe.throw(_("Score cannot be greater than Maximum Score"))
def validate_grade(self):
self.total_score = 0.0
for d in self.details:
d.grade = get_grade(self.grading_scale, (flt(d.score)/d.maximum_score)*100)
self.total_score += d.score
self.grade = get_grade(self.grading_scale, (self.total_score/self.maximum_score)*100)
| gpl-3.0 | 2,132,014,199,482,007,300 | 37.666667 | 111 | 0.744253 | false |
marratj/ansible | lib/ansible/modules/network/illumos/dladm_linkprop.py | 29 | 7824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_linkprop
short_description: Manage link properties on Solaris/illumos systems.
description:
- Set / reset link properties on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
link:
description:
- Link interface name.
required: true
aliases: [ "nic", "interface" ]
property:
description:
- Specifies the name of the property we want to manage.
required: true
aliases: [ "name" ]
value:
description:
- Specifies the value we want to set for the link property.
required: false
temporary:
description:
- Specifies that lin property configuration is temporary. Temporary
link property configuration does not persist across reboots.
required: false
type: bool
default: false
state:
description:
- Set or reset the property value.
required: false
default: "present"
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
name: Set 'maxbw' to 100M on e1000g1
dladm_linkprop: name=e1000g1 property=maxbw value=100M state=present
name: Set 'mtu' to 9000 on e1000g1
dladm_linkprop: name=e1000g1 property=mtu value=9000
name: Reset 'mtu' property on e1000g1
dladm_linkprop: name=e1000g1 property=mtu state=reset
'''
RETURN = '''
property:
description: property name
returned: always
type: string
sample: mtu
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
link:
description: link name
returned: always
type: string
sample: e100g0
value:
description: property value
returned: always
type: string
sample: 9000
'''
from ansible.module_utils.basic import AnsibleModule
class LinkProp(object):
def __init__(self, module):
self.module = module
self.link = module.params['link']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
self.dladm_bin = self.module.get_bin_path('dladm', True)
def property_exists(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" on link %s' %
(self.property, self.link),
property=self.property,
link=self.link)
def property_is_modified(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_readonly(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('perm')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and out == 'r-':
return True
else:
return False
def property_is_set(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.dladm_bin]
cmd.append('set-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + '=' + self.value)
cmd.append(self.link)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.dladm_bin]
cmd.append('reset-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
link=dict(required=True, default=None, type='str', aliases=['nic', 'interface']),
property=dict(required=True, type='str', aliases=['name']),
value=dict(required=False, type='str'),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
required_if=[
['state', 'present', ['value']],
],
supports_check_mode=True
)
linkprop = LinkProp(module)
rc = None
out = ''
err = ''
result = {}
result['property'] = linkprop.property
result['link'] = linkprop.link
result['state'] = linkprop.state
if linkprop.value:
result['value'] = linkprop.value
if linkprop.state == 'absent' or linkprop.state == 'reset':
if linkprop.property_exists():
if not linkprop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.reset_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
elif linkprop.state == 'present':
if linkprop.property_exists():
if not linkprop.property_is_readonly():
if not linkprop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.set_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
else:
module.fail_json(msg='Property "%s" is read-only!' % (linkprop.property),
property=linkprop.property,
link=linkprop.link)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,472,987,972,616,592,000 | 25.879725 | 93 | 0.538737 | false |
AndyHuu/flocker | flocker/provision/_ssh/_model.py | 8 | 7061 | from collections import MutableSequence
from pipes import quote as shell_quote
from pyrsistent import PRecord, field
from effect import Effect, sync_performer
def identity(arg):
"""Return argument untouched."""
return arg
class RunRemotely(PRecord):
"""
Run some commands on a remote host.
:ivar bytes address: The address of the remote host to connect to.
:ivar bytes username: The user to connect as.
:ivar Effect commands: The commands to run.
:ivar int port: The port of the ssh server to connect to.
:ivar callable log_command_filter: A filter to apply to any logging
of the executed command.
"""
username = field(type=bytes, mandatory=True)
address = field(type=bytes, mandatory=True)
commands = field(type=Effect, mandatory=True)
port = field(type=int, initial=22)
log_command_filter = field(mandatory=True)
def run_remotely(
username, address, commands, port=22, log_command_filter=identity):
"""
Run some commands on a remote host.
:param bytes address: The address of the remote host to connect to.
:param bytes username: The user to connect as.
:param Effect commands: The commands to run.
:param int port: The port of the ssh server to connect to.
:param callable log_command_filter: A filter to apply to any logging
of the executed command.
:return Effect:
"""
return Effect(RunRemotely(
username=username, address=address, commands=commands, port=port,
log_command_filter=log_command_filter))
def _shell_join(seq):
"""
Convert a nested list of strings to a shell command.
Each string in the list is escaped as necessary to allow it to be
passed to a shell as a single word. If an item is a list, it is a
nested command, which will be escaped first, and then added as a
single word to the top-level command.
For example, ['su', 'root', '-c', ['apt-get', 'update']] becomes
"su root -c 'apt-get update'".
"""
result = []
for word in seq:
if isinstance(word, (tuple, MutableSequence)):
word = _shell_join(word)
escaped = shell_quote(word)
result.append(escaped)
return ' '.join(result)
class Run(PRecord):
"""
Run a shell command on a remote host.
:ivar bytes command: The command to run.
:ivar callable log_command_filter: A filter to apply to any logging
of the executed command.
"""
command = field(type=bytes, mandatory=True)
log_command_filter = field(mandatory=True)
@classmethod
def from_args(cls, command_args, log_command_filter=identity):
return cls(
command=_shell_join(command_args),
log_command_filter=log_command_filter)
class Sudo(PRecord):
"""
Run a shell command on a remote host with sudo.
:ivar bytes command: The command to run.
:ivar callable log_command_filter: A filter to apply to any logging
of the executed command.
"""
command = field(type=bytes, mandatory=True)
log_command_filter = field(mandatory=True)
@classmethod
def from_args(cls, command_args, log_command_filter=identity):
return cls(
command=_shell_join(command_args),
log_command_filter=log_command_filter)
@sync_performer
def perform_sudo(dispatcher, intent):
"""
Default implementation of `Sudo`.
"""
return Effect(Run(
command='sudo ' + intent.command, log_command_filter=identity))
class Put(PRecord):
"""
Create a file with the given content on a remote host.
:ivar bytes content: The desired contents.
:ivar bytes path: The remote path to create.
:ivar callable log_content_filter: A filter to apply to any logging
of the transferred content.
"""
content = field(type=bytes, mandatory=True)
path = field(type=bytes, mandatory=True)
log_content_filter = field(mandatory=True)
@sync_performer
def perform_put(dispatcher, intent):
"""
Default implementation of `Put`.
"""
def create_put_command(content, path):
return 'printf -- %s > %s' % (shell_quote(content), shell_quote(path))
return Effect(Run(
command=create_put_command(intent.content, intent.path),
log_command_filter=lambda _: create_put_command(
intent.log_content_filter(intent.content), intent.path)
))
class Comment(PRecord):
"""
Record a comment to be shown in the documentation corresponding to a task.
:ivar bytes comment: The desired comment.
"""
comment = field(type=bytes, mandatory=True)
@sync_performer
def perform_comment(dispatcher, intent):
"""
Default implementation of `Comment`.
"""
def run(command, log_command_filter=identity):
"""
Run a shell command on a remote host.
:param bytes command: The command to run.
:param callable log_command_filter: A filter to apply to any logging
of the executed command.
"""
return Effect(Run(command=command, log_command_filter=log_command_filter))
def sudo(command, log_command_filter=identity):
"""
Run a shell command on a remote host with sudo.
:param bytes command: The command to run.
:param callable log_command_filter: A filter to apply to any logging
of the executed command.
:return Effect:
"""
return Effect(Sudo(command=command, log_command_filter=log_command_filter))
def put(content, path, log_content_filter=identity):
"""
Create a file with the given content on a remote host.
:param bytes content: The desired contents.
:param bytes path: The remote path to create.
:param callable log_content_filter: A filter to apply to any logging
of the transferred content.
:return Effect:
"""
return Effect(Put(
content=content, path=path, log_content_filter=log_content_filter))
def comment(comment):
"""
Record a comment to be shown in the documentation corresponding to a task.
:param bytes comment: The desired comment.
:return Effect:
"""
return Effect(Comment(comment=comment))
def run_from_args(command, log_command_filter=identity):
"""
Run a command on a remote host. This quotes the provided arguments, so they
are not interpreted by the shell.
:param list command: The command to run.
:param callable log_command_filter: A filter to apply to any logging
of the executed command.
:return Effect:
"""
return Effect(
Run.from_args(command, log_command_filter=log_command_filter))
def sudo_from_args(command, log_command_filter=identity):
"""
Run a command on a remote host with sudo. This quotes the provided
arguments, so they are not interpreted by the shell.
:param list command: The command to run.
:param callable log_command_filter: A filter to apply to any logging
of the executed command.
:return Effect:
"""
return Effect(
Sudo.from_args(command, log_command_filter=log_command_filter))
| apache-2.0 | 1,975,530,347,451,023,000 | 28.919492 | 79 | 0.672709 | false |
jbedorf/tensorflow | tensorflow/python/kernel_tests/diag_op_test.py | 7 | 23049 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class MatrixDiagTest(test.TestCase):
@test_util.run_deprecated_v1
def testVector(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
def _testBatchVector(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
@test_util.run_deprecated_v1
def testBatchVector(self):
self._testBatchVector(np.float32)
self._testBatchVector(np.float64)
self._testBatchVector(np.int32)
self._testBatchVector(np.int64)
self._testBatchVector(np.bool)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_diag(0)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3,), (7, 4))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class MatrixSetDiagTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, self.evaluate(output))
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, self.evaluate(output))
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]]).astype(dtype)
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]]).astype(dtype)
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[-1.0, -2.0], [-4.0, -5.0]])
mat_batch = np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_set_diag([[0]], 0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError(
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(
np.random.rand(*shape), dtype=dtypes_lib.float32)
diag_shape = shape[:-2] + (min(shape[-2:]),)
x_diag = constant_op.constant(
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
y = array_ops.matrix_set_diag(x, x_diag)
error_x = gradient_checker.compute_gradient_error(
x,
x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = gradient_checker.compute_gradient_error(
x_diag,
x_diag.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
@test_util.run_deprecated_v1
def testGradWithNoShapeInformation(self):
with self.session(use_gpu=True) as sess:
v = array_ops.placeholder(dtype=dtypes_lib.float32)
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
output = array_ops.matrix_set_diag(mat, v)
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(
grads,
feed_dict={
v: 2 * np.ones(3),
mat: np.ones((3, 3)),
grad_input: grad_input_val
})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
class MatrixDiagPartTest(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session(use_gpu=True):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session(use_gpu=True):
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_diag_part(0)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testInvalidShapeAtEval(self):
with self.session(use_gpu=True):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.session(use_gpu=True):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
y = array_ops.matrix_diag_part(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class DiagTest(test.TestCase):
def _diagOp(self, diag, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = self.evaluate(tf_ans)
tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def diagOp(self, diag, dtype, expected_ans):
self._diagOp(diag, dtype, expected_ans, False)
self._diagOp(diag, dtype, expected_ans, True)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array([[1.1, 0, 0], [0, 2.2, 0], [0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankOneComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype)
expected_ans = np.array(
[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 2.2 + 2.2j, 0 + 0j],
[0 + 0j, 0 + 0j, 3.3 + 3.3j]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array([[[[1, 0, 0], [0, 0, 0]], [[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]], [[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]], [[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]], [[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]], [[0, 0, 0],
[0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],
[4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]],
dtype=dtype)
expected_ans = np.array(
[[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]], [
[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]], [[
[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]
], [[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array([[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array(
[[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],
[[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],
dtype=dtype)
expected_ans = np.array(
[[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]], [[0 + 0j, 0 + 0j], [
0 + 0j, 0 + 0j
]]]]], [[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [
[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]
]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 6.6 + 6.6j], [
0 + 0j, 0 + 0j
]]]], [[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]], [[0 + 0j, 0 + 0j], [
7.7 + 7.7j, 0 + 0j
]]], [[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankFourNumberTensor(self):
for dtype in [np.float32, np.float64, np.int64, np.int32]:
# Input with shape [2, 1, 2, 3]
x = np.array(
[[[[1, 2, 3], [4, 5, 6]]], [[[7, 8, 9], [10, 11, 12]]]], dtype=dtype)
# Output with shape [2, 1, 2, 3, 2, 1, 2, 3]
expected_ans = np.array(
[[[[[[[[1, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 2, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 3], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]]], [[
[[[0, 0, 0], [4, 0, 0]]], [[[0, 0, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 5, 0]]], [[[0, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 6]]], [[[0, 0, 0], [0, 0, 0]]]
]]]], [[[[[[[0, 0, 0], [0, 0, 0]]], [[[7, 0, 0], [0, 0, 0]]]], [
[[[0, 0, 0], [0, 0, 0]]], [[[0, 8, 0], [0, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 9], [0, 0, 0]]]]], [[
[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [10, 0, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 11, 0]]]
], [[[[0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 12]]]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
@test_util.run_deprecated_v1
def testInvalidRank(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.diag(0.0)
class DiagPartOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def diagPartOp(self, tensor, dtype, expected_ans):
self._diagPartOp(tensor, dtype, expected_ans, False)
self._diagPartOp(tensor, dtype, expected_ans, True)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.cached_session(use_gpu=False):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = self.evaluate(tf_ans)
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankEightComplexTensor(self):
x = np.random.rand(2, 2, 2, 3, 2, 2, 2, 3)
i = np.arange(2)[:, None, None, None]
j = np.arange(2)[:, None, None]
k = np.arange(2)[:, None]
l = np.arange(3)
expected_ans = x[i, j, k, l, i, j, k, l]
self.diagPartOp(x, np.complex64, expected_ans)
self.diagPartOp(x, np.complex128, expected_ans)
@test_util.run_deprecated_v1
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
with self.assertRaises(ValueError):
array_ops.diag_part(0.0)
@test_util.run_deprecated_v1
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3, 3), (3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3, 3), (3, 3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag_part(x1)
error = gradient_checker.compute_gradient_error(
x1,
x1.get_shape().as_list(), y,
y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
| apache-2.0 | -55,699,534,788,896,450 | 39.939609 | 80 | 0.536986 | false |
compston/TAP-Workshop | utilities/Gnip-Python-Search-API-Utilities/gnip_search.py | 1 | 11170 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Scott Hendrickson, Josh Montague"
import sys
import json
import codecs
import argparse
import datetime
import time
import os
import ConfigParser
from search.results import *
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
DEFAULT_CONFIG_FILENAME = "./.gnip"
class GnipSearchCMD():
USE_CASES = ["json", "wordcount","users", "rate", "links", "timeline", "geo", "audience"]
def __init__(self, token_list_size=40):
# default tokenizer and character limit
char_upper_cutoff = 20 # longer than for normal words because of user names
self.token_list_size = int(token_list_size)
#############################################
# CONFIG FILE/COMMAND LINE OPTIONS PATTERN
# parse config file
config_from_file = self.config_file()
# set required fields to None. Sequence of setting is:
# (1) config file
# (2) command line
# if still none, then fail
self.user = None
self.password = None
self.stream_url = None
if config_from_file is not None:
try:
# command line options take presidence if they exist
self.user = config_from_file.get('creds', 'un')
self.password = config_from_file.get('creds', 'pwd')
self.stream_url = config_from_file.get('endpoint', 'url')
except (ConfigParser.NoOptionError,
ConfigParser.NoSectionError) as e:
print >> sys.stderr, "Error reading configuration file ({}), ignoring configuration file.".format(e)
# parse the command line options
self.options = self.args().parse_args()
self.options.filter = self.options.filter.decode("utf-8")
# set up the job
# over ride config file with command line args if present
if self.options.user is not None:
self.user = self.options.user
if self.options.password is not None:
self.password = self.options.password
if self.options.stream_url is not None:
self.stream_url = self.options.stream_url
#
# Search v2 uses a different url
if "data-api.twitter.com" in self.stream_url:
self.options.search_v2 = True
elif self.options.search_v2:
print >> sys.stderr, "WARNING: You set the search v2 flag, but your URL appears to point to a v1 endpoint."
self.options.search_v2 = False
def config_file(self):
config = ConfigParser.ConfigParser()
# (1) default file name precidence
config.read(DEFAULT_CONFIG_FILENAME)
if not config.has_section("creds"):
# (2) environment variable file name second
if 'GNIP_CONFIG_FILE' in os.environ:
config_filename = os.environ['GNIP_CONFIG_FILE']
config.read(config_filename)
if config.has_section("creds") and config.has_section("endpoint"):
return config
else:
return None
def args(self):
twitter_parser = argparse.ArgumentParser(
description="GnipSearch supports the following use cases: %s"%str(self.USE_CASES))
twitter_parser.add_argument("use_case", metavar= "USE_CASE", choices=self.USE_CASES,
help="Use case for this search.")
twitter_parser.add_argument("-a", "--paged", dest="paged", action="store_true",
default=False, help="Paged access to ALL available results (Warning: this makes many requests)")
twitter_parser.add_argument("-c", "--csv", dest="csv_flag", action="store_true",
default=False,
help="Return comma-separated 'date,counts' or geo data.")
twitter_parser.add_argument("-b", "--bucket", dest="count_bucket",
default="day",
help="Bucket size for counts query. Options are day, hour, minute (default is 'day').")
twitter_parser.add_argument("-e", "--end-date", dest="end",
default=None,
help="End of datetime window, format 'YYYY-mm-DDTHH:MM' (default: most recent activities)")
twitter_parser.add_argument("-f", "--filter", dest="filter", default="from:jrmontag OR from:gnip",
help="PowerTrack filter rule (See: http://support.gnip.com/customer/portal/articles/901152-powertrack-operators)")
twitter_parser.add_argument("-l", "--stream-url", dest="stream_url",
default=None,
help="Url of search endpoint. (See your Gnip console.)")
twitter_parser.add_argument("-n", "--results-max", dest="max", default=100,
help="Maximum results to return (default 100)")
twitter_parser.add_argument("-p", "--password", dest="password", default=None,
help="Password")
twitter_parser.add_argument("-q", "--query", dest="query", action="store_true",
default=False, help="View API query (no data)")
twitter_parser.add_argument("-s", "--start-date", dest="start",
default=None,
help="Start of datetime window, format 'YYYY-mm-DDTHH:MM' (default: 30 days ago)")
twitter_parser.add_argument("-u", "--user-name", dest="user", default=None,
help="User name")
twitter_parser.add_argument("-w", "--output-file-path", dest="output_file_path", default=None,
help="Create files in ./OUTPUT-FILE-PATH. This path must exists and will not be created. This options is available only with -a option. Default is no output files.")
# depricated... leave in for compatibility
twitter_parser.add_argument("-t", "--search-v2", dest="search_v2", action="store_true",
default=False,
help="Using search API v2 endpoint. [This is depricated and is automatically set based on endpoint.]")
return twitter_parser
def get_result(self):
WIDTH = 80
BIG_COLUMN = 32
res = [u"-"*WIDTH]
if self.options.use_case.startswith("time"):
self.results = Results(
self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=self.options.count_bucket
, show_query=self.options.query
, search_v2=self.options.search_v2
)
res = []
if self.options.csv_flag:
for x in self.results.get_time_series():
res.append("{:%Y-%m-%dT%H:%M:%S},{},{}".format(x[2], x[0], x[1]))
else:
res = [x for x in self.results.get_activities()]
return '{"results":' + json.dumps(res) + "}"
else:
self.results = Results(
self.user
, self.password
, self.stream_url
, self.options.paged
, self.options.output_file_path
, pt_filter=self.options.filter
, max_results=int(self.options.max)
, start=self.options.start
, end=self.options.end
, count_bucket=None
, show_query=self.options.query
, search_v2=self.options.search_v2
)
if self.options.use_case.startswith("rate"):
rate = self.results.query.get_rate()
unit = "Tweets/Minute"
if rate < 0.01:
rate *= 60.
unit = "Tweets/Hour"
res.append(" PowerTrack Rule: \"%s\""%self.options.filter)
res.append(" Oldest Tweet (UTC): %s"%str(self.results.query.oldest_t))
res.append(" Newest Tweet (UTC): %s"%str(self.results.query.newest_t))
res.append(" Now (UTC): %s"%str(datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")))
res.append(" %5d Tweets: %6.3f %s"%(len(self.results), rate, unit))
res.append("-"*WIDTH)
elif self.options.use_case.startswith("geo"):
res = []
for x in self.results.get_geo():
if self.options.csv_flag:
try:
res.append("{},{},{},{}".format(x["id"], x["postedTime"], x["longitude"], x["latitude"]))
except KeyError, e:
print >> sys.stderr, str(e)
else:
res.append(json.dumps(x))
elif self.options.use_case.startswith("json"):
res = [json.dumps(x) for x in self.results.get_activities()]
if self.options.csv_flag:
res = ["|".join(x) for x in self.results.query.get_list_set()]
elif self.options.use_case.startswith("word"):
fmt_str = u"%{}s -- %10s %8s ".format(BIG_COLUMN)
res.append(fmt_str%( "terms", "mentions", "activities"))
res.append("-"*WIDTH)
fmt_str = u"%{}s -- %4d %5.2f%% %4d %5.2f%%".format(BIG_COLUMN)
for x in self.results.get_top_grams(n=self.token_list_size):
res.append(fmt_str%(x[4], x[0], x[1]*100., x[2], x[3]*100.))
res.append(" TOTAL: %d activities"%len(self.results))
res.append("-"*WIDTH)
elif self.options.use_case.startswith("user"):
fmt_str = u"%{}s -- %10s %8s ".format(BIG_COLUMN)
res.append(fmt_str%( "terms", "mentions", "activities"))
res.append("-"*WIDTH)
fmt_str = u"%{}s -- %4d %5.2f%% %4d %5.2f%%".format(BIG_COLUMN)
for x in self.results.get_top_users(n=self.token_list_size):
res.append(fmt_str%(x[4], x[0], x[1]*100., x[2], x[3]*100.))
res.append(" TOTAL: %d activities"%len(self.results))
res.append("-"*WIDTH)
elif self.options.use_case.startswith("link"):
res[-1]+=u"-"*WIDTH
res.append(u"%100s -- %10s %8s (%d)"%("links", "mentions", "activities", len(self.results)))
res.append("-"*2*WIDTH)
for x in self.results.get_top_links(n=self.token_list_size):
res.append(u"%100s -- %4d %5.2f%% %4d %5.2f%%"%(x[4], x[0], x[1]*100., x[2], x[3]*100.))
res.append("-"*WIDTH)
elif self.options.use_case.startswith("audie"):
for x in self.results.get_users():
res.append(u"{}".format(x))
res.append("-"*WIDTH)
return u"\n".join(res)
if __name__ == "__main__":
g = GnipSearchCMD()
print unicode(g.get_result())
| mit | 4,436,343,099,066,533,000 | 48.866071 | 181 | 0.537959 | false |
Pytlicek/MailgunMailer | app/models/Newsletters.py | 1 | 2753 | from flask import session
from app.log import get_logger
from app.models.SQL_DB import Newsletter, db
logger = get_logger(__name__)
def user_newsletters(email):
newsletters = Newsletter.query.filter_by(username=email).all()
return newsletters
def add_newsletter(username, subject, message, recipients, sender, tags, campaign):
newsletter = Newsletter(
None,
username,
recipients,
message,
None,
None,
0,
subject,
sender,
tags,
campaign,
)
db.session.add(newsletter)
return db.session.commit()
def edit_add_newsletter(
username, subject, message, recipients, sender, tags, campaign, newsletter_id
):
newsletter = Newsletter.query.filter_by(username=username, id=newsletter_id).first()
newsletter.subject = subject
newsletter.message = message
newsletter.recipients = recipients
newsletter.sender = sender
newsletter.tags = tags
newsletter.campaign = campaign
return db.session.commit()
def send_newsletter(newsletter_id):
from app.models.Mailgun_Internal import mailgun_send_newsletter
username = session["username"]
mg_api_private = session["mg_api_private"]
mg_domain = session["mg_domain"]
newsletter = Newsletter.query.filter_by(username=username, id=newsletter_id).first()
sender = newsletter.sender
recipients = newsletter.recipients
subject = newsletter.subject
message = newsletter.message
tags = newsletter.tags
campaign = newsletter.campaign
response = mailgun_send_newsletter(
sender, recipients, subject, message, tags, campaign, mg_api_private, mg_domain
)
if response.status_code in [200, 201]:
return response.json()
else:
e = Exception(f"Mailgun returned response status code {response.status_code}")
logger.exception(e)
raise e
def delete_newsletter(newsletter_id):
username = session["username"]
newsletter = Newsletter.query.filter_by(username=username, id=newsletter_id).first()
db.session.delete(newsletter)
return db.session.commit()
def edit_newsletter(newsletter_id):
username = session["username"]
newsletter_data = Newsletter.query.filter_by(
username=username, id=newsletter_id
).first()
return newsletter_data
def update_newsletter_status(mailgun_response, newsletter_id):
if "Queued. Thank you." in mailgun_response["message"]:
queued = True
else:
queued = False
if queued:
mg_id = mailgun_response["id"]
newsletter = Newsletter.query.filter_by(id=newsletter_id).first()
newsletter.mg_status = True
newsletter.mg_id = mg_id
return db.session.commit()
| mit | 2,927,432,413,659,847,700 | 27.978947 | 88 | 0.678533 | false |
Kjili/analysis-preservation.cern.ch | tests/unit/fixtures/test_fixtures_cli.py | 3 | 2099 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CERN Analysis Preservation fixtures CLI test cases."""
from __future__ import absolute_import, print_function
from click.testing import CliRunner
from invenio_pages.models import Page
from cap.modules.fixtures.cli import loadpages_cli
def test_loadpages(script_info, db):
"""Test version import."""
assert Page.query.count() == 0
runner = CliRunner()
res = runner.invoke(loadpages_cli, [], obj=script_info)
assert res.exit_code == 0
assert Page.query.count() == 2
page = Page.query.filter_by(url='/about').one()
assert page.title == 'About Cap'
assert len(page.description) > 20
assert len(page.content) > 100
assert page.template_name == 'invenio_pages/dynamic.html'
res = runner.invoke(loadpages_cli, [], obj=script_info)
assert res.exit_code != 0
res = runner.invoke(loadpages_cli, ['-f'], obj=script_info)
assert res.exit_code == 0
for p in Page.query.all():
assert p.title
assert p.url
assert p.template_name
| gpl-2.0 | -6,740,100,909,940,735,000 | 37.163636 | 78 | 0.722249 | false |
janusnic/ecommerce | ecommerce/extensions/dashboard/users/views.py | 1 | 1719 | import logging
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from oscar.apps.dashboard.users.views import UserDetailView as CoreUserDetailView
import requests
import waffle
logger = logging.getLogger(__name__)
class UserDetailView(CoreUserDetailView):
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
if waffle.switch_is_active('user_enrollments_on_dashboard'):
context['enrollments'] = self._get_enrollments()
return context
def _get_enrollments(self):
"""Retrieve the enrollments for the User being viewed."""
username = self.object.username
try:
url = '{}?user={}'.format(settings.ENROLLMENT_API_URL, username)
timeout = settings.ENROLLMENT_FULFILLMENT_TIMEOUT
headers = {
'Content-Type': 'application/json',
'X-Edx-Api-Key': settings.EDX_API_KEY
}
response = requests.get(url, headers=headers, timeout=timeout)
status_code = response.status_code
if status_code == 200:
return response.json()
else:
logger.warning(u'Failed to retrieve enrollments for [%s]. Enrollment API returned status code [%d].',
username, status_code)
except Exception: # pylint: disable=broad-except
logger.exception(u'An unexpected error occurred while retrieving enrollments for [%s].', username)
messages.add_message(self.request, messages.ERROR, _(u'Failed to retrieve enrollment data.'))
return []
| agpl-3.0 | 5,146,681,864,673,447,000 | 36.369565 | 117 | 0.640489 | false |
Naarukarmic/itr | SA_BE_Manipulation/grasp_ball_in_box.py | 1 | 5048 | from math import sqrt
from hpp import Transform
from hpp.corbaserver.manipulation import ConstraintGraph
from manipulation import robot, vf, ps, Ground, Box, Pokeball, PathPlayer, gripperName, ballName
vf.loadEnvironmentModel (Ground, 'ground')
vf.loadEnvironmentModel (Box, 'box')
vf.moveObstacle ('box/base_link_0', [0.3+0.04, 0, 0.04, 0, 0, 0, 1])
vf.moveObstacle ('box/base_link_1', [0.3-0.04, 0, 0.04, 0, 0, 0, 1])
vf.moveObstacle ('box/base_link_2', [0.3, 0.04, 0.04, 0, 0, 0, 1])
vf.moveObstacle ('box/base_link_3', [0.3, -0.04, 0.04, 0, 0, 0, 1])
vf.loadObjectModel (Pokeball, 'pokeball')
robot.setJointBounds ('pokeball/root_joint', [-.4,.4,-.4,.4,-.1,1.,
-1.0001, 1.0001,-1.0001, 1.0001,
-1.0001, 1.0001,-1.0001, 1.0001,])
r = vf.createViewer ()
q1 = [0, -1.57, 1.57, 0, 0, 0, .3, 0, 0.025, 0, 0, 0, 1]
r (q1)
## Create graph
graph = ConstraintGraph (robot, 'graph')
## Create constraint of relative position of the ball in the gripper when ball
## is grasped
ballInGripper = [0, .137, 0, 0.5, 0.5, -0.5, 0.5]
ps.createTransformationConstraint ('grasp', gripperName, ballName,
ballInGripper, 6*[True,])
## Create nodes and edges
# Warning the order of the nodes is important. When checking in which node
# a configuration lies, node constraints will be checked in the order of node
# creation.
graph.createNode (['grasp-placement', 'gripper-above-ball', 'placement', 'ball-above-ground', 'grasp'])
graph.createEdge ('placement', 'placement', 'transit', 1, 'placement')
graph.createEdge ('placement', 'gripper-above-ball', 'approach-ball', 1, 'placement')
graph.createEdge ('gripper-above-ball', 'placement', 'move-gripper-away', 1, 'placement')
graph.createEdge ('gripper-above-ball', 'grasp-placement', 'grasp-ball', 1, 'placement')
graph.createEdge ('grasp-placement', 'gripper-above-ball', 'move-gripper-up', 1, 'placement')
graph.createEdge ('grasp', 'grasp', 'transfer', 1, 'grasp')
graph.createEdge ('grasp-placement', 'ball-above-ground', 'take-ball-up', 1, 'grasp')
graph.createEdge ('ball-above-ground', 'grasp-placement', 'put-ball-down', 1, 'grasp')
graph.createEdge ('ball-above-ground', 'grasp', 'take-ball-away', 1, 'grasp')
graph.createEdge ('grasp', 'ball-above-ground', 'approach-ground', 1, 'grasp')
## Create transformation constraint : ball is in horizontal plane with free
## rotation around z
ps.createTransformationConstraint ('placement', '', ballName, [0,0,0.025,0, 0, 0, 1], [False, False, True, True, True, False,])
# Create complement constraint
ps.createTransformationConstraint ('placement/complement', '', ballName, [0,0,0.025,0, 0, 0, 1], [True, True, False, False, False, True,])
ps.createTransformationConstraint ('above-ball', gripperName, ballName, [0, 0.237, 0, 0.5, 0.5, -0.5, 0.5], [True, True, True, True, True, True,])
ps.createTransformationConstraint ('above-ground', '', ballName, [0,0,0.2,0, 0, 0, 1], [False, False, True, True, True, False,])
ps.createTransformationConstraint('z-only', '', gripperName, [0,0,0.2,0,0,1], [True, True, False, True, True, True])
ps.setConstantRightHandSide ('placement', True)
ps.setConstantRightHandSide ('above-ball', True)
ps.setConstantRightHandSide ('above-ground', True)
ps.setConstantRightHandSide ('placement/complement', False)
ps.setConstantRightHandSide ('z-only', False)
## Set constraints of nodes and edges
graph.setConstraints (node='placement', numConstraints = ['placement'])
graph.setConstraints (node='gripper-above-ball', numConstraints = ['placement','above-ball'])
graph.setConstraints (node='grasp-placement', numConstraints = ['grasp', 'placement'])
graph.setConstraints (node='ball-above-ground', numConstraints = ['grasp', 'above-ground'])
graph.setConstraints (node='grasp', numConstraints = ['grasp'])
graph.setConstraints (edge='transit', numConstraints = ['placement/complement'])
graph.setConstraints (edge='approach-ball', numConstraints = ['placement/complement'])
graph.setConstraints (edge='move-gripper-up', numConstraints = ['placement/complement'])
graph.setConstraints (edge='move-gripper-away', numConstraints = ['placement/complement'])
graph.setConstraints (edge='grasp-ball', numConstraints = ['z-only'])
graph.setConstraints (edge='take-ball-up', numConstraints = ['z-only', 'grasp'])
graph.setConstraints (edge='put-ball-down', numConstraints = ['z-only', 'grasp'])
graph.setConstraints (edge='take-ball-away', numConstraints = [])
graph.setConstraints (edge='transfer', numConstraints = [])
graph.setConstraints (edge='approach-ground', numConstraints = [])
graph.setConstraints (edge='put-ball-down', numConstraints = [])
res, q_init, error = graph.applyNodeConstraints ('placement', q1)
q2 = q1 [::]
q2 [7] = .2
res, q_goal, error = graph.applyNodeConstraints ('placement', q2)
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.selectPathValidation ("Discretized", 0.01)
ps.selectPathProjector ("Progressive", 0.1)
pp = PathPlayer (ps.client.basic, r)
| mit | -131,080,501,755,695,420 | 49.48 | 146 | 0.696117 | false |
blagasz/python-ann | neurolab/train/gd.py | 1 | 11660 | # -*- coding: utf-8 -*-
"""
Train algorithm based gradients algorithms
"""
import numpy as np
from neurolab.core import Train
import neurolab.tool as tool
class TrainGD(Train):
"""
Gradient descent backpropogation
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.01)
learning rate
adapt bool (default False)
type of learning
rr float (defaults 0.0)
Regularization ratio
Must be between {0, 1}
"""
def __init__(self, net, input, target, lr=0.01, adapt=False, rr=0.0):
self.adapt = adapt
self.lr = lr
self.rr = rr
def __call__(self, net, input, target):
if not self.adapt:
while True:
g, output = self.calc(net, input, target)
# regularization grad
if self.rr > 0:
g = tool.reg_grad(g, net, self.rr)
e = self.error(net, input, target, output)
# regularization error
if self.rr:
e = tool.reg_error(e, net, self.rr)
self.epochf(e, net, input, target)
self.learn(net, g)
else:
while True:
for i in range(input.shape[0]):
g = self.calc(net, [input[i]], [target[i]])[0]
# regularization grad
if self.rr > 0:
g = tool.reg_grad(g, net, self.rr)
self.learn(net, g)
e = self.error(net, input, target)
# regularization error
if self.rr:
e = tool.reg_error(e, self.rr, net)
self.epochf(e, net, input, target)
return None
def calc(self, net, input, target):
g1, g2, output = tool.ff_grad(net, input, target)
return g1, output
def learn(self, net, grad):
for ln, layer in enumerate(net.layers):
layer.np['w'] -= self.lr * grad[ln]['w']
layer.np['b'] -= self.lr * grad[ln]['b']
return None
class TrainGD2(TrainGD):
"""
Gradient descent backpropagation
(another realization of TrainGD)
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.01)
learning rate
adapt bool (default False)
type of learning
"""
def __init__(self, net, input, target, lr=0.01, adapt=False):
self.adapt = adapt
self.lr = lr
self.x = tool.np_get_ref(net)
# Regularization not suppotr
self.rr = 0
def calc(self, net, input, target):
g1, g2, output = tool.ff_grad(net, input, target)
return g2, output
def learn(self, net, grad):
self.x -= self.lr * grad
class TrainGDM(TrainGD):
"""
Gradient descent with momentum backpropagation
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.01)
learning rate
adapt bool (default False)
type of learning
mc: float (default 0.9)
Momentum constant
rr float (defaults 0.0)
Regularization ratio
Must be between {0, 1}
"""
def __init__(self, net, input, target,
lr=0.01, adapt=False, mc=0.9, rr=.0):
super(TrainGDM, self).__init__(net, input, target, lr, adapt, rr)
self.mc = mc
self.dw = [0] * len(net.layers)
self.db = [0] * len(net.layers)
def learn(self, net, grad):
# print 'GDM.learn'
mc = self.mc
lr = self.lr
for ln, layer in enumerate(net.layers):
self.dw[ln] = mc * self.dw[ln] + ((1 - mc) * lr) * grad[ln]['w']
self.db[ln] = mc * self.db[ln] + ((1 - mc) * lr) * grad[ln]['b']
layer.np['w'] -= self.dw[ln]
layer.np['b'] -= self.db[ln]
return None
class TrainGDA(TrainGD):
"""
Gradient descent with adaptive learning rate
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.01)
learning rate
adapt: bool (default False)
type of learning
lr_inc: float (> 1, default 1.05)
Ratio to increase learning rate
lr_dec: float (< 1, default 0.7)
Ratio to decrease learning rate
max_perf_inc:float (> 1, default 1.04)
Maximum performance increase
rr float (defaults 0.0)
Regularization ratio
Must be between {0, 1}
"""
def __init__(self, net, input, target, lr=0.01, adapt=False,
lr_inc=1.05, lr_dec=0.7,
max_perf_inc=1.04, rr=.0):
super(TrainGDA, self).__init__(net, input, target, lr, adapt, rr)
self.lr_inc = lr_inc
self.lr_dec = lr_dec
self.max_perf_inc = max_perf_inc
self.err = []
def learn(self, net, grad):
# print 'GDA.learn'
if len(self.err) > 1:
f = self.err[-1] / self.err[-2]
if f > self.max_perf_inc:
self.lr *= self.lr_dec
elif f < 1:
self.lr *= self.lr_inc
super(TrainGDA, self).learn(net, grad)
return None
def error(self, *args, **kwargs):
e = super(TrainGDA, self).error(*args, **kwargs)
self.err.append(e)
return e
class TrainGDX(TrainGDA, TrainGDM):
"""
Gradient descent with momentum backpropagation and adaptive lr
:Support networks:
newff (multi-layers perceptron)
:Рarameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.01)
learning rate
adapt: bool (default False)
type of learning
lr_inc: float (default 1.05)
Ratio to increase learning rate
lr_dec: float (default 0.7)
Ratio to decrease learning rate
max_perf_inc:float (default 1.04)
Maximum performance increase
mc: float (default 0.9)
Momentum constant
rr float (defaults 0.0)
Regularization ratio
Must be between {0, 1}
"""
def __init__(self, net, input, target, lr=0.01, adapt=False, lr_inc=1.05,
lr_dec=0.7, max_perf_inc=1.04,
mc=0.9, rr=.0):
super(TrainGDX, self).__init__(net, input, target, lr, adapt, lr_inc,
lr_dec, max_perf_inc, rr)
self.mc = mc
class TrainRprop(TrainGD2):
"""
Resilient Backpropagation
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.07)
learning rate (init rate)
adapt bool (default False)
type of learning
rate_dec: float (default 0.5)
Decrement to weight change
rate_inc: float (default 1.2)
Increment to weight change
rate_min: float (default 1e-9)
Minimum performance gradient
rate_max: float (default 50)
Maximum weight change
"""
def __init__(self, net, input, target, lr=0.07, adapt=False,
rate_dec=0.5, rate_inc=1.2, rate_min=1e-9, rate_max=50):
super(TrainRprop, self).__init__(net, input, target, lr, adapt)
self.rate_inc = rate_inc
self.rate_dec = rate_dec
self.rate_max = rate_max
self.rate_min = rate_min
size = tool.np_size(net)
self.grad_prev = np.zeros(size)
self.rate = np.zeros(size) + lr
def learn(self, net, grad):
prod = grad * self.grad_prev
# Sign not change
ind = prod > 0
self.rate[ind] *= self.rate_inc
# Sign change
ind = prod < 0
self.rate[ind] *= self.rate_dec
self.rate[self.rate > self.rate_max] = self.rate_max
self.rate[self.rate < self.rate_min] = self.rate_min
self.x -= self.rate * np.sign(grad)
self.grad_prev = grad
return None
class TrainRpropM(TrainRprop):
"""
Resilient Backpropogation Modified
(with back-step when grad change sign)
:Support networks:
newff (multi-layers perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (defaults 0.07)
learning rate (init rate)
adapt bool (default False)
type of learning
rate_dec: float (default 0.5)
Decrement to weight change
rate_inc: float (default 1.2)
Increment to weight change
rate_min: float (default 1e-9)
Minimum performance gradient
rate_max: float (default 50)
Maximum weight change
"""
def learn(self, net, grad):
prod = grad * self.grad_prev
# Sign not change
ind = prod > 0
self.rate[ind] *= self.rate_inc
# Sign change
ind = prod < 0
# Back step
self.x[ind] -= self.rate[ind] * np.sign(grad[ind])
grad[ind] *= -1
self.rate[ind] *= self.rate_dec
self.rate[self.rate > self.rate_max] = self.rate_max
self.rate[self.rate < self.rate_min] = self.rate_min
self.x -= self.rate * np.sign(grad)
self.grad_prev = grad
return None
| gpl-2.0 | 8,233,483,902,903,956,000 | 28.661578 | 77 | 0.529982 | false |
Houzz/luigi | test/contrib/salesforce_test.py | 9 | 3894 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Simply Measured
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# This method will be used by the mock to replace requests.get
"""
Unit test for the Salesforce contrib package
"""
from luigi.contrib.salesforce import SalesforceAPI, QuerySalesforce
from helpers import unittest
import mock
from luigi.mock import MockTarget
from luigi.six import PY3
import re
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, body, status_code):
self.body = body
self.status_code = status_code
@property
def text(self):
return self.body
def raise_for_status(self):
return None
result_list = (
'<result-list xmlns="http://www.force.com/2009/06/asyncapi/dataload">'
'<result>1234</result><result>1235</result><result>1236</result>'
'</result-list>'
)
return MockResponse(result_list, 200)
# Keep open around so we can use it in the mock responses
old__open = open
def mocked_open(*args, **kwargs):
if re.match("job_data", str(args[0])):
return MockTarget(args[0]).open(args[1])
else:
return old__open(*args)
class TestSalesforceAPI(unittest.TestCase):
# We patch 'requests.get' with our own method. The mock object is passed in to our test case method.
@mock.patch('requests.get', side_effect=mocked_requests_get)
def test_deprecated_results_warning(self, mock_get):
sf = SalesforceAPI('xx', 'xx', 'xx')
if PY3:
with self.assertWarnsRegex(UserWarning, r'get_batch_results is deprecated'):
result_id = sf.get_batch_results('job_id', 'batch_id')
else:
result_id = sf.get_batch_results('job_id', 'batch_id')
self.assertEqual('1234', result_id)
@mock.patch('requests.get', side_effect=mocked_requests_get)
def test_result_ids(self, mock_get):
sf = SalesforceAPI('xx', 'xx', 'xx')
result_ids = sf.get_batch_result_ids('job_id', 'batch_id')
self.assertEqual(['1234', '1235', '1236'], result_ids)
class TestQuerySalesforce(QuerySalesforce):
def output(self):
return MockTarget('job_data.csv')
@property
def object_name(self):
return 'dual'
@property
def soql(self):
return "SELECT * FROM %s" % self.object_name
class TestSalesforceQuery(unittest.TestCase):
patch_name = '__builtin__.open'
if PY3:
patch_name = 'builtins.open'
@mock.patch(patch_name, side_effect=mocked_open)
def setUp(self, mock_open):
MockTarget.fs.clear()
self.result_ids = ['a', 'b', 'c']
counter = 1
self.all_lines = "Lines\n"
self.header = "Lines"
for i, id in enumerate(self.result_ids):
filename = "%s.%d" % ('job_data.csv', i)
with MockTarget(filename).open('w') as f:
line = "%d line\n%d line" % ((counter), (counter+1))
f.write(self.header + "\n" + line + "\n")
self.all_lines += line+"\n"
counter += 2
@mock.patch(patch_name, side_effect=mocked_open)
def test_multi_csv_download(self, mock_open):
qsf = TestQuerySalesforce()
qsf.merge_batch_results(self.result_ids)
self.assertEqual(MockTarget(qsf.output().path).open('r').read(), self.all_lines)
| apache-2.0 | 1,970,273,524,199,984,600 | 30.918033 | 104 | 0.632768 | false |
mpattyn/fumiste | prototypePython/steamapi/requests/__init__.py | 1 | 1856 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2014 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.1.0'
__build__ = 0x020100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| mit | 7,162,068,231,538,493,000 | 23.103896 | 77 | 0.631466 | false |
NewCaliforniaWaterAtlas/ca-freshwater-species-backend-rest-framework | api/settings.py | 1 | 2252 | """
Django settings for api project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%64jrs=yt4iop+hnywkews%e^%xcnj&^&y(07!w@zoz5w-s1r9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'api.urls'
WSGI_APPLICATION = 'api.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
POSTGIS_VERSION = (2, 1, 0)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': 'localhost',
'NAME': os.environ['DB'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
}
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| mit | 3,608,196,549,639,135,700 | 22.458333 | 78 | 0.700266 | false |
bdyetton/prettychart | website/models.py | 5 | 1195 | # -*- coding: utf-8 -*-
"""Consolidates all necessary models from the framework and website packages.
"""
from framework.auth.core import User
from framework.guid.model import Guid, BlacklistGuid
from framework.sessions.model import Session
from website.project.model import (
Node, NodeLog,
Tag, WatchConfig, MetaSchema, Pointer,
Comment, PrivateLink, MetaData, Retraction,
Embargo,
)
from website.oauth.models import ExternalAccount
from website.identifiers.model import Identifier
from website.citations.models import CitationStyle
from website.conferences.model import Conference, MailRecord
from website.notifications.model import NotificationDigest
from website.notifications.model import NotificationSubscription
from website.archiver.model import ArchiveJob, ArchiveTarget
# All models
MODELS = (
User, Node, NodeLog,
Tag, WatchConfig, Session, Guid, MetaSchema, Pointer,
MailRecord, Comment, PrivateLink, MetaData, Conference,
NotificationSubscription, NotificationDigest, CitationStyle,
CitationStyle, ExternalAccount, Identifier, Retraction,
Embargo, ArchiveJob, ArchiveTarget, BlacklistGuid
)
GUID_MODELS = (User, Node, Comment, MetaData)
| apache-2.0 | 2,030,941,442,163,092,500 | 35.212121 | 77 | 0.794142 | false |
Cal-CS-61A-Staff/ok | server/jobs/__init__.py | 1 | 3089 | import functools
import io
import logging
from flask_login import current_user
from flask_rq import get_connection, get_queue
import redis.exceptions
import rq
from server.constants import APPLICATION_ROOT
from server.models import db, Job
class JobLogHandler(logging.StreamHandler):
"""Stream log contents to buffer and to DB. """
def __init__(self, stream, job, log_every=10):
super().__init__(stream)
self.stream = stream
self.job = job
self.counter = 0
self.log_every = log_every
def handle(self, record):
self.counter += 1
super().handle(record)
print(record.message)
if (self.counter % self.log_every) == 0:
self.job.log = self.contents
db.session.commit()
@property
def contents(self):
return self.stream.getvalue()
def get_current_job():
rq_job = rq.get_current_job(connection=get_connection())
return Job.query.get(rq_job.id)
def get_job_logger():
return logging.getLogger('{}.job_{}'.format(__name__, get_current_job().id))
def background_job(f):
@functools.wraps(f)
def job_handler(*args, **kwargs):
job = get_current_job()
job.status = 'running'
db.session.commit()
stream = io.StringIO()
handler = JobLogHandler(stream, job)
logger = get_job_logger()
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return_value = None
try:
return_value = f(*args, **kwargs)
except Exception:
job.failed = True
logger.exception('Job failed')
job.status = 'finished'
job.result = _format_result(job, return_value)
job.log = handler.contents
stream.close()
db.session.commit()
return job_handler
def _format_result(job, result):
if job.result_kind == 'link':
if result and not result.startswith(APPLICATION_ROOT):
result = APPLICATION_ROOT + result
return result
def enqueue_job(func, *args,
description=None, course_id=None, user_id=None, timeout=300,
result_kind='string', **kwargs):
if not description:
raise ValueError('Description required to start background job')
if not course_id:
raise ValueError('Course ID required to start background job')
if not user_id:
user_id = current_user.id
job = Job(
status='queued',
course_id=course_id,
user_id=user_id,
name=func.__name__,
description=description,
result_kind=result_kind
)
db.session.add(job)
db.session.commit()
try:
get_queue().enqueue_call(
func=func,
args=args,
kwargs=kwargs,
job_id=str(job.id),
timeout=timeout
)
except redis.exceptions.ConnectionError as e:
job.failed = True
job.status = 'finished'
job.log = 'Could not connect to Redis: ' + str(e)
db.session.add(job)
db.session.commit()
return job
| apache-2.0 | 5,238,284,720,155,805,000 | 26.096491 | 80 | 0.597604 | false |
ykaneko/neutron | neutron/tests/base.py | 4 | 2827 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Test Case for all Unit Tests"""
import logging
import os
import fixtures
from oslo.config import cfg
import stubout
import testtools
from neutron.openstack.common import exception
CONF = cfg.CONF
TRUE_STRING = ['True', '1']
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
if os.environ.get('OS_DEBUG') in TRUE_STRING:
_level = logging.DEBUG
else:
_level = logging.INFO
self.useFixture(fixtures.FakeLogger(format=LOG_FORMAT, level=_level))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(CONF.reset)
if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
| apache-2.0 | -7,705,615,731,703,952,000 | 33.901235 | 77 | 0.678458 | false |
HybridF5/nova | nova/api/openstack/compute/assisted_volume_snapshots.py | 33 | 3881 | # Copyright 2013 Red Hat, Inc.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Assisted volume snapshots extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from webob import exc
from nova.api.openstack.compute.schemas import assisted_volume_snapshots
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
ALIAS = 'os-assisted-volume-snapshots'
authorize = extensions.os_compute_authorizer(ALIAS)
class AssistedVolumeSnapshotsController(wsgi.Controller):
"""The Assisted volume snapshots API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
super(AssistedVolumeSnapshotsController, self).__init__()
@extensions.expected_errors(400)
@validation.schema(assisted_volume_snapshots.snapshots_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context, action='create')
snapshot = body['snapshot']
create_info = snapshot['create_info']
volume_id = snapshot['volume_id']
LOG.info(_LI("Create assisted snapshot from volume %s"), volume_id,
context=context)
try:
return self.compute_api.volume_snapshot_create(context, volume_id,
create_info)
except (exception.VolumeBDMNotFound,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
@wsgi.response(204)
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context, action='delete')
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
delete_metadata = {}
delete_metadata.update(req.GET)
try:
delete_info = jsonutils.loads(delete_metadata['delete_info'])
volume_id = delete_info['volume_id']
except (KeyError, ValueError) as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
try:
self.compute_api.volume_snapshot_delete(context, volume_id,
id, delete_info)
except (exception.VolumeBDMNotFound,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except exception.NotFound as e:
return exc.HTTPNotFound(explanation=e.format_message())
class AssistedVolumeSnapshots(extensions.V21APIExtensionBase):
"""Assisted volume snapshots."""
name = "AssistedVolumeSnapshots"
alias = ALIAS
version = 1
def get_resources(self):
res = [extensions.ResourceExtension(ALIAS,
AssistedVolumeSnapshotsController())]
return res
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 | -9,150,551,308,493,265,000 | 34.605505 | 78 | 0.662716 | false |
chromium2014/src | tools/telemetry/telemetry/web_perf/metrics/responsiveness_metric.py | 9 | 2009 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.web_perf import timeline_interaction_record as tir_module
from telemetry.web_perf.metrics import mainthread_jank_stats
from telemetry.web_perf.metrics import timeline_based_metric
class ResponsivenessMetric(timeline_based_metric.TimelineBasedMetric):
"""Computes metrics that measure respsonsiveness on the record ranges.
total_big_jank_thread_time is the total thread duration of all top
slices whose thread time ranges overlapped with any thread time ranges of
the records and the overlapped thread duration is greater than or equal
USER_PERCEIVABLE_DELAY_THRESHOLD_MS.
biggest_jank_thread_time is the biggest thread duration of all
top slices whose thread time ranges overlapped with any of records' thread
time ranges.
All *_time values are measured in milliseconds.
"""
def __init__(self):
super(ResponsivenessMetric, self).__init__()
def AddResults(self, _, renderer_thread, interaction_records, results):
self.VerifyNonOverlappedRecords(interaction_records)
try:
jank_stats = mainthread_jank_stats.MainthreadJankStats(
renderer_thread, interaction_records)
# TODO(nednguyen): maybe fall back to use wall-time for computing the
# metrics.
except tir_module.NoThreadTimeDataException as e:
#TODO(nednguyen): Report the warning with page_results system.
logging.warning(
'Main thread jank metrics cannot be computed for records %s since '
'trace does not contain thread time data. %s',
repr(interaction_records), repr(e))
return
results.Add('responsive-total_big_jank_thread_time', 'ms',
jank_stats.total_big_jank_thread_time)
results.Add('responsive-biggest_jank_thread_time', 'ms',
jank_stats.biggest_jank_thread_time)
| bsd-3-clause | 2,616,633,755,200,080,400 | 41.744681 | 80 | 0.72673 | false |
openstack/sahara-tests | sahara_tests/unit/scenario/test_base.py | 1 | 32865 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from saharaclient.api import cluster_templates
from saharaclient.api import clusters
from saharaclient.api import data_sources
from saharaclient.api import images
from saharaclient.api import job_binaries
from saharaclient.api import job_binary_internals
from saharaclient.api import job_executions
from saharaclient.api import jobs
from saharaclient.api import node_group_templates
from saharaclient.api import plugins
from tempest.lib import exceptions as exc
import testtools
from sahara_tests.scenario import base
from sahara_tests.scenario import timeouts
class FakeSaharaClient(object):
def __init__(self):
self.clusters = clusters.ClusterManager(None)
self.cluster_templates = cluster_templates.ClusterTemplateManager(None)
self.node_group_templates = (node_group_templates.
NodeGroupTemplateManager(None))
self.plugins = plugins.PluginManager(None)
self.images = images.ImageManager(None)
self.data_sources = data_sources.DataSourceManager(None)
self.jobs = jobs.JobsManager(None)
self.job_executions = job_executions.JobExecutionsManager(None)
self.job_binaries = job_binaries.JobBinariesManager(None)
self.job_binary_internals = (
job_binary_internals.JobBinaryInternalsManager(None))
class FakeCluster(object):
def __init__(self, is_transient=False, provision_progress=[], ng=[]):
self.is_transient = is_transient
self.provision_progress = provision_progress
self.node_groups = ng
class FakeResponse(object):
def __init__(self, set_id=None, set_status=None, status_description=None,
node_groups=None, url=None, job_id=None, name=None,
job_type=None, verification=None):
self.id = set_id
self.status = set_status
self.status_description = status_description
self.node_groups = node_groups
self.url = url
self.job_id = job_id
self.name = name
self.type = job_type
self.verification = verification
class FakeFlavor(object):
def __init__(self, flavor_id=None, name=None):
self.id = flavor_id
self.name = name
class TestBase(testtools.TestCase):
def setUp(self):
super(TestBase, self).setUp()
with mock.patch(
'sahara_tests.scenario.base.BaseTestCase.__init__'
) as mock_init:
mock_init.return_value = None
self.base_scenario = base.BaseTestCase()
self.base_scenario.credentials = {'os_username': 'admin',
'os_password': 'nova',
'os_tenant': 'admin',
'os_auth_url':
'http://localhost:5000/v2.0',
's3_accesskey': 'very_long_key',
's3_secretkey': 'very_long_secret',
's3_endpoint': 'https://localhost',
'sahara_service_type':
'data-processing-local',
'sahara_url':
'http://sahara_host:8386/v1.1',
'ssl_cert': 'sahara_tests/unit/'
'scenario/dummy.crt',
'ssl_verify': True}
self.base_scenario.plugin_opts = {'plugin_name': 'vanilla',
'hadoop_version': '2.7.1'}
self.base_scenario.network = {'private_network': 'changed_private',
'public_network': 'changed_public',
'auto_assignment_floating_ip': False}
self.base_scenario.testcase = {
'node_group_templates': [
{
'name': 'master',
'node_processes': ['namenode', 'oozie', 'resourcemanager'],
'flavor': '2',
'is_proxy_gateway': True
},
{
'name': 'worker',
'node_processes': ['datanode', 'nodemanager'],
'flavor': '2'
}],
'cluster_template': {
'name': 'test_name_ct',
'node_group_templates': {
'master': 1,
'worker': 3
}
},
'timeout_poll_cluster_status': 300,
'timeout_delete_resource': 300,
'timeout_poll_jobs_status': 2,
'timeout_check_transient': 3,
'retain_resources': True,
'image': 'image_name',
'edp_batching': 1,
"edp_jobs_flow": {
"test_flow": [
{
"type": "Pig",
"input_datasource": {
"type": "swift",
"source": "sahara_tests/scenario/defaults/"
"edp-examples/edp-pig/"
"top-todoers/data/input"
},
"output_datasource": {
"type": "hdfs",
"destination": "/user/hadoop/edp-output"
},
"main_lib": {
"type": "s3",
"source": "sahara_tests/scenario/defaults/"
"edp-examples/edp-pig/"
"top-todoers/example.pig"
}
}
]
}
}
self.base_scenario.ng_id_map = {'worker': 'set_id', 'master': 'set_id'}
self.base_scenario.ng_name_map = {}
self.base_scenario.key_name = 'test_key'
self.base_scenario.key = 'key_from_yaml'
self.base_scenario.template_path = ('sahara_tests/scenario/templates/'
'vanilla/2.7.1')
self.job = self.base_scenario.testcase["edp_jobs_flow"].get(
'test_flow')[0]
self.base_scenario.cluster_id = 'some_id'
self.base_scenario.proxy_ng_name = False
self.base_scenario.proxy = False
self.base_scenario.setUpClass()
timeouts.Defaults.init_defaults(self.base_scenario.testcase)
@mock.patch('keystoneauth1.identity.v3.Password')
@mock.patch('keystoneauth1.session.Session')
@mock.patch('glanceclient.client.Client', return_value=None)
@mock.patch('saharaclient.client.Client', return_value=None)
@mock.patch('novaclient.client.Client', return_value=None)
@mock.patch('neutronclient.neutron.client.Client', return_value=None)
@mock.patch('swiftclient.client.Connection', return_value=None)
def test__init_clients(self, swift, neutron, nova, sahara, glance,
m_session, m_auth):
fake_session = mock.Mock()
fake_auth = mock.Mock()
m_session.return_value = fake_session
m_auth.return_value = fake_auth
self.base_scenario._init_clients()
sahara.assert_called_with('1.1',
session=fake_session,
service_type='data-processing-local',
sahara_url='http://sahara_host:8386/v1.1')
swift.assert_called_with(
auth_version='2.0', user='admin', key='nova', insecure=False,
cacert='sahara_tests/unit/scenario/dummy.crt',
tenant_name='admin', authurl='http://localhost:5000/v2.0')
nova.assert_called_with('2', session=fake_session)
neutron.assert_called_with('2.0', session=fake_session)
glance.assert_called_with('2', session=fake_session)
m_auth.assert_called_with(auth_url='http://localhost:5000/v3',
username='admin',
password='nova',
project_name='admin',
user_domain_name='default',
project_domain_name='default')
m_session.assert_called_with(
auth=fake_auth,
cert='sahara_tests/unit/scenario/dummy.crt', verify=True)
@mock.patch('neutronclient.v2_0.client.Client.list_networks',
return_value={'networks': [{'id': '2314'}]})
@mock.patch('saharaclient.api.node_group_templates.'
'NodeGroupTemplateManager.create',
return_value=FakeResponse(set_id='id_ng'))
def test__create_node_group_template(self, mock_del, mock_saharaclient):
self.base_scenario._init_clients()
self.assertEqual({'worker': 'id_ng', 'master': 'id_ng'},
self.base_scenario._create_node_group_templates())
@mock.patch('neutronclient.v2_0.client.Client.list_networks',
return_value={'networks': [{'id': '2314'}]})
def test__create_node_group_template_bootfromvolume_apiv1(self, mock_del):
self.base_scenario._init_clients()
self.base_scenario.use_api_v2 = False
for ng in self.base_scenario.testcase['node_group_templates']:
ng['boot_from_volume'] = True
with self.assertRaisesRegex(Exception, "^boot_from_volume is.*"):
self.base_scenario._create_node_group_templates()
@mock.patch('saharaclient.api.node_group_templates.'
'NodeGroupTemplateManager.create',
return_value=FakeResponse(set_id='id_ng'))
@mock.patch('neutronclient.v2_0.client.Client.list_networks',
return_value={'networks': [
{'id': '342'}
]})
@mock.patch('neutronclient.v2_0.client.Client.create_security_group',
return_value={'security_group': {'id': '213'}})
@mock.patch('sahara_tests.scenario.clients.NeutronClient'
'.add_security_group_rule_for_neutron',
return_value='sg_name')
@mock.patch('sahara_tests.scenario.clients.NeutronClient'
'.delete_security_group_for_neutron',
return_value=None)
def test__create_security_group_uuid(self, mock_del, mock_add_rule,
mock_sg, mock_neutron, mock_ng):
self.base_scenario.network['public_network'] = (
'692dcc5b-1205-4645-8a12-2558579ed17e')
self.base_scenario._init_clients()
for ng in self.base_scenario.testcase['node_group_templates']:
ng['auto_security_group'] = False
self.assertEqual({'master': 'id_ng', 'worker': 'id_ng'},
self.base_scenario._create_node_group_templates())
@mock.patch('saharaclient.api.node_group_templates.'
'NodeGroupTemplateManager.create',
return_value=FakeResponse(set_id='id_ng'))
@mock.patch('neutronclient.v2_0.client.Client.list_networks',
return_value={'networks': [
{'id': '342'}
]})
@mock.patch('neutronclient.v2_0.client.Client.create_security_group',
return_value={'security_group': {'id': '213'}})
@mock.patch('sahara_tests.scenario.clients.NeutronClient'
'.create_security_group_for_neutron',
return_value='sg_name')
@mock.patch('neutronclient.v2_0.client.Client.create_security_group_rule',
return_value=None)
@mock.patch('neutronclient.v2_0.client.Client.delete_security_group',
return_value=None)
def test__create_security_group(self, mock_del, mock_create, mock_sg,
mock_sgn, mock_list, mock_ng):
self.base_scenario._init_clients()
for ng in self.base_scenario.testcase['node_group_templates']:
ng['auto_security_group'] = False
self.assertEqual({'master': 'id_ng', 'worker': 'id_ng'},
self.base_scenario._create_node_group_templates())
@mock.patch('sahara_tests.scenario.clients.NeutronClient.get_network_id',
return_value='mock_net')
@mock.patch('saharaclient.api.cluster_templates.'
'ClusterTemplateManager.create',
return_value=FakeResponse(set_id='id_ct'))
def test__create_cluster_template(self, mock_ct, mock_neutron):
self.base_scenario._init_clients()
self.assertEqual('id_ct',
self.base_scenario._create_cluster_template())
@mock.patch('saharaclient.api.images.ImageManager.get',
return_value=FakeResponse(set_id='image'))
@mock.patch('sahara_tests.scenario.clients.GlanceClient.get_image_id',
return_value='mock_image')
@mock.patch('saharaclient.api.clusters.ClusterManager.create',
return_value=FakeResponse(set_id='id_cluster'))
def test__create_cluster(self, mock_cluster_manager, mock_glance,
mock_image):
self.base_scenario._init_clients()
self.assertEqual('id_cluster',
self.base_scenario._create_cluster('id_ct'))
@mock.patch('sahara_tests.scenario.clients.NeutronClient.get_network_id',
return_value='mock_net')
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(
set_status=base.CLUSTER_STATUS_ACTIVE))
def test__poll_cluster_status(self, mock_status, mock_neutron):
self.base_scenario._init_clients()
self.assertIsNone(
self.base_scenario._poll_cluster_status('id_cluster'))
@mock.patch('saharaclient.api.base.ResourceManager._get')
def test_check_event_log_feature(self, mock_resp):
self.base_scenario._init_clients()
self.assertIsNone(self.base_scenario._check_event_logs(
FakeCluster(True, [])))
self.assertIsNone(self.base_scenario._check_event_logs(
FakeCluster(False, [{'successful': True}])))
with testtools.ExpectedException(exc.TempestException):
self.base_scenario._check_event_logs(
FakeCluster(False, [{'successful': False}]))
with testtools.ExpectedException(exc.TempestException):
self.base_scenario._check_event_logs(
FakeCluster(False, [{'successful': None}]))
@mock.patch('saharaclient.api.base.ResourceManager._update',
return_value=FakeResponse(set_id='id_internal_db_data'))
def test__create_internal_db_data(self, mock_update):
self.base_scenario._init_clients()
self.assertEqual('internal-db://id_internal_db_data',
self.base_scenario._create_internal_db_data(
'sahara_tests/unit/scenario/vanilla2_7_1.yaml'))
@mock.patch('swiftclient.client.Connection.put_container',
return_value=None)
def test__create_swift_data(self, mock_swiftclient):
self.base_scenario._init_clients()
self.assertIn('swift://sahara-tests-',
self.base_scenario._create_swift_data())
@mock.patch('swiftclient.client.Connection.put_container',
return_value=None)
def test__get_swift_container(self, mock_swiftclient):
self.base_scenario._init_clients()
self.assertIn('sahara-tests-',
self.base_scenario._get_swift_container())
@mock.patch('saharaclient.api.base.ResourceManager._create',
return_value=FakeResponse(set_id='id_for_datasource'))
@mock.patch('swiftclient.client.Connection.put_container',
return_value=None)
@mock.patch('swiftclient.client.Connection.put_object',
return_value=None)
def test__create_datasources(self, mock_swiftcontainer, mock_swiftobject,
mock_create):
self.base_scenario._init_clients()
self.assertEqual(('id_for_datasource', 'id_for_datasource'),
self.base_scenario._create_datasources(
self.job))
@mock.patch('saharaclient.api.base.ResourceManager._create',
return_value=FakeResponse(set_id='id_for_job_binaries'))
@mock.patch('sahara_tests.scenario.clients.BotoClient.upload_data',
return_value={})
@mock.patch('sahara_tests.scenario.clients.BotoClient.create_bucket',
return_value={'Location': 'foo'})
@mock.patch('swiftclient.client.Connection.put_object',
return_value=None)
@mock.patch('swiftclient.client.Connection.put_container',
return_value=None)
def test__create_create_job_binaries(self, mock_swiftcontainer,
mock_swiftobject,
mock_create_bucket,
mock_upload_bucket_data,
mock_sahara_create):
self.base_scenario._init_clients()
self.assertEqual((['id_for_job_binaries'], []),
self.base_scenario._create_job_binaries(
self.job))
@mock.patch('saharaclient.api.base.ResourceManager._create',
return_value=FakeResponse(set_id='id_for_job_binary'))
@mock.patch('sahara_tests.scenario.clients.BotoClient.create_bucket',
return_value={'Location': 'foo'})
@mock.patch('swiftclient.client.Connection.put_object',
return_value=None)
@mock.patch('swiftclient.client.Connection.put_container',
return_value=None)
@mock.patch('saharaclient.client.Client', return_value=FakeSaharaClient())
def test__create_create_job_binary(self, mock_saharaclient,
mock_swiftcontainer, mock_swiftobject,
mock_create_bucket, mock_sahara_create):
self.base_scenario._init_clients()
self.assertEqual('id_for_job_binary',
self.base_scenario._create_job_binary(self.job.get(
'input_datasource')))
@mock.patch('saharaclient.api.base.ResourceManager._create',
return_value=FakeResponse(set_id='id_for_job'))
def test__create_job(self, mock_client):
self.base_scenario._init_clients()
self.assertEqual('id_for_job',
self.base_scenario._create_job(
'Pig',
['id_for_job_binaries'],
[]))
@mock.patch('sahara_tests.scenario.clients.SaharaClient.get_cluster_id',
return_value='cluster_id')
@mock.patch('sahara_tests.scenario.clients.SaharaClient.get_cluster',
return_value=FakeCluster(ng=[]))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.check_cinder',
return_value=None)
@mock.patch('sahara_tests.scenario.clients.SaharaClient.get_job_status',
return_value='KILLED')
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(set_id='id_for_run_job_get',
job_type='Java',
name='test_job'))
@mock.patch('saharaclient.api.base.ResourceManager._create',
return_value=FakeResponse(set_id='id_for_run_job_create'))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.'
'_poll_cluster_status',
return_value=None)
@mock.patch('sahara_tests.scenario.base.BaseTestCase.'
'_create_node_group_templates',
return_value='id_node_group_template')
@mock.patch('sahara_tests.scenario.base.BaseTestCase.'
'_create_cluster_template',
return_value='id_cluster_template')
@mock.patch('sahara_tests.scenario.base.BaseTestCase._create_cluster',
return_value='id_cluster')
@mock.patch('sahara_tests.scenario.base.BaseTestCase._create_job',
return_value='id_for_job')
@mock.patch('sahara_tests.scenario.base.BaseTestCase._create_job_binaries',
return_value=(['id_for_job_binaries'], []))
@mock.patch('sahara_tests.scenario.base.BaseTestCase._create_datasources',
return_value=('id_for_datasource', 'id_for_datasource'))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.check_verification')
def test_check_run_jobs(self, mock_verification, mock_datasources,
mock_job_binaries, mock_job,
mock_node_group_template, mock_cluster_template,
mock_cluster, mock_cluster_status, mock_create,
mock_get, mock_client, mock_cinder, mock_get_cl,
mock_get_cluster_id):
self.base_scenario._init_clients()
self.base_scenario.create_cluster()
self.base_scenario.testcase["edp_jobs_flow"] = [
{
"type": "Pig",
"input_datasource": {
"type": "s3",
"source": "sahara_tests/scenario/defaults/edp-examples/"
"edp-pig/top-todoers/"
"data/input"
},
"output_datasource": {
"type": "hdfs",
"destination": "/user/hadoop/edp-output"
},
"main_lib": {
"type": "swift",
"source": "sahara_tests/scenario/defaults/edp-examples/"
"edp-pig/top-todoers/"
"example.pig"
}
}
]
with mock.patch('time.sleep'):
self.assertIsNone(self.base_scenario.check_run_jobs())
self.assertIn("Job with id=id_for_run_job_create, name=test_job, "
"type=Java has status KILLED",
self.base_scenario._results[-1]['traceback'][-1])
@mock.patch('sahara_tests.scenario.base.BaseTestCase._poll_cluster_status',
return_value=None)
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(set_id='id_scale_get'))
@mock.patch('saharaclient.api.base.ResourceManager._update',
return_value=FakeResponse(set_id='id_scale_update'))
def test_check_scale(self, mock_update, mock_get, mock_poll):
self.base_scenario._init_clients()
self.base_scenario.ng_id_map = {'vanilla-worker': 'set_id-w',
'vanilla-master': 'set_id-m'}
self.base_scenario.ng_name_map = {'vanilla-worker': 'worker-123',
'vanilla-master': 'master-321'}
self.base_scenario.cluster_id = 'cluster_id'
self.assertIsNone(self.base_scenario.check_scale())
@mock.patch('sahara_tests.scenario.clients.NeutronClient.get_network_id',
return_value='mock_net')
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(set_status='Error',
status_description=""))
def test_errormsg(self, mock_status, mock_neutron):
self.base_scenario._init_clients()
with testtools.ExpectedException(exc.TempestException):
self.base_scenario._poll_cluster_status('id_cluster')
def test_get_nodes_with_process(self):
self.base_scenario._init_clients()
with mock.patch(
'sahara_tests.scenario.clients.SaharaClient.get_cluster',
return_value=FakeResponse(node_groups=[
{
'node_processes': ['test'],
'instances': ['test_instance']
}
])):
self.assertEqual(
['test_instance'],
self.base_scenario._get_nodes_with_process('test')
)
with mock.patch(
'sahara_tests.scenario.clients.SaharaClient.get_cluster',
return_value=FakeResponse(node_groups=[
{
'node_processes': 'test',
'instances': []
}
])):
self.assertEqual(
[], self.base_scenario._get_nodes_with_process('test'))
def test_get_node_list_with_volumes(self):
self.base_scenario._init_clients()
with mock.patch(
'sahara_tests.scenario.clients.SaharaClient.get_cluster',
return_value=FakeResponse(node_groups=[
{
'node_processes': 'test',
'volumes_per_node': 2,
'volume_mount_prefix': 2,
'instances': [
{
'management_ip': 'test_ip'
}
]
}
])):
self.assertEqual(
[{
'node_ip': 'test_ip',
'volume_count': 2,
'volume_mount_prefix': 2
}], self.base_scenario._get_node_list_with_volumes())
@mock.patch('sahara_tests.scenario.clients.SaharaClient.get_datasource')
def test_put_io_data_to_configs(self, get_datasources):
self.base_scenario._init_clients()
get_datasources.side_effect = [
mock.Mock(id='1', url="swift://cont/input"),
mock.Mock(id='2', url="hdfs://cont/output")
]
configs = {'args': ['2', "{input_datasource}",
"{output_datasource}"]}
self.assertEqual({'args': ['2', 'swift://cont/input',
'hdfs://cont/output']},
self.base_scenario._put_io_data_to_configs(
configs, '1', '2'))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.addCleanup')
@mock.patch('novaclient.v2.flavors.FlavorManager.create',
return_value=FakeFlavor(flavor_id='created_flavor_id'))
def test_get_flavor_id_anonymous(self, mock_create_flavor, mock_base):
self.base_scenario._init_clients()
self.assertEqual('created_flavor_id',
self.base_scenario._get_flavor_id({
"id": 'created_flavor_id',
"vcpus": 1,
"ram": 512,
"root_disk": 1,
"ephemeral_disk": 1,
"swap_disk": 1
}))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.addCleanup')
@mock.patch('novaclient.v2.flavors.FlavorManager.create',
return_value=FakeFlavor(flavor_id='created_flavor_id'))
@mock.patch('novaclient.v2.flavors.FlavorManager.list',
return_value=[FakeFlavor(flavor_id='existing_flavor_id',
name='test-flavor')])
def test_get_flavor_name_found(self, mock_list_flavor, mock_create_flavor,
mock_base):
self.base_scenario._init_clients()
self.assertEqual('existing_flavor_id',
self.base_scenario._get_flavor_id({
'name': 'test-flavor',
"id": 'created_flavor_id',
"vcpus": 1,
"ram": 512,
"root_disk": 1,
"ephemeral_disk": 1,
"swap_disk": 1
}))
@mock.patch('sahara_tests.scenario.base.BaseTestCase.addCleanup')
@mock.patch('novaclient.v2.flavors.FlavorManager.create',
return_value=FakeFlavor(flavor_id='created_flavor_id'))
@mock.patch('novaclient.v2.flavors.FlavorManager.list',
return_value=[FakeFlavor(flavor_id='another_flavor_id',
name='another-flavor')])
def test_get_flavor_id_not_found(self, mock_list_flavor,
mock_create_flavor, mock_base):
self.base_scenario._init_clients()
self.assertEqual('created_flavor_id',
self.base_scenario._get_flavor_id({
'name': 'test-flavor',
"id": 'created_flavor_id',
"vcpus": 1,
"ram": 512,
"root_disk": 1,
"ephemeral_disk": 1,
"swap_disk": 1
}))
@mock.patch('sahara_tests.scenario.base.BaseTestCase._run_command_on_node')
def test_create_hdfs_data(self, mock_ssh):
self.base_scenario._init_clients()
output_path = '/user/test/data/output'
self.assertEqual(output_path,
self.base_scenario._create_dfs_data(None, output_path,
None, 'hdfs'))
input_path = ('sahara_tests/scenario/defaults/edp-examples/edp-pig/'
'trim-spaces/data/input')
with mock.patch(
'sahara_tests.scenario.clients.SaharaClient.get_cluster',
return_value=FakeResponse(node_groups=[
{
'node_processes': ['master', 'namenode'],
'instances': [{
'management_ip': 'test_ip'
}]
}])):
self.assertIn('/user/test/data-', (
self.base_scenario._create_dfs_data(input_path, None,
'test', 'hdfs')))
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(
set_status=base.CLUSTER_STATUS_ACTIVE,
verification={'verification': {
'status': 'GREEN',
'cluster_id': 'id_cluster'
}}))
@mock.patch('saharaclient.api.clusters.ClusterManager.verification_update')
@mock.patch('sahara_tests.scenario.base.BaseTestCase.'
'check_feature_available', return_value=True)
def test_check_verification_did_not_start(self, mock_feature,
mock_verification,
mock_get_status):
self.base_scenario._init_clients()
self.assertIsNone(self.base_scenario.check_verification('id_cluster'))
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(
set_status=base.CLUSTER_STATUS_ACTIVE))
@mock.patch('saharaclient.api.clusters.ClusterManager.verification_update')
@mock.patch('sahara_tests.scenario.base.BaseTestCase.'
'check_feature_available', return_value=True)
@mock.patch('sahara_tests.scenario.base.BaseTestCase._get_health_status',
return_value='GREEN')
def test_verification_start(self, mock_status, mock_feature,
mock_verification, mock_get_status):
self.base_scenario._init_clients()
self.assertIsNone(self.base_scenario.check_verification('id_cluster'))
@mock.patch('saharaclient.api.base.ResourceManager._get',
return_value=FakeResponse(
set_status=base.CLUSTER_STATUS_ACTIVE))
@mock.patch('saharaclient.api.clusters.ClusterManager.verification_update')
def test_verification_skipped(self, mock_verification, mock_get_status):
self.base_scenario._init_clients()
self.assertIsNone(self.base_scenario.check_verification('id_cluster'))
| apache-2.0 | 3,546,009,782,637,582,300 | 47.545052 | 79 | 0.54094 | false |
fusionbox/django-extensions | django_extensions/management/commands/runserver_plus.py | 2 | 6895 | from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import setup_logger, RedirectHandler
from optparse import make_option
import os
import sys
import time
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError, e:
USE_STATICFILES = False
import logging
logger = logging.getLogger(__name__)
def null_technical_500_response(request, exc_type, exc_value, tb):
raise exc_type, exc_value, tb
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
from django.db.backends import util
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
util.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Using the Werkzeug debugger (http://werkzeug.pocoo.org/)"
print "Quit the server with %s." % quit_command
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
run_simple(addr, int(port), DebuggedApplication(handler, True),
use_reloader=use_reloader, use_debugger=True, threaded=threaded)
inner_run()
| mit | 8,625,338,265,703,211,000 | 43.772727 | 161 | 0.589848 | false |
MITHyperloopTeam/software_core | software/UI/braking_control.py | 1 | 2694 | #!/usr/bin/env python
#signif reference to http://pastebin.com/k87sfiEf
import sys
import math
import signal
import time
import os
import math, random
import numpy as np
from numpy import linalg
from PIL import Image
#interface stuff
from PyQt4 import QtCore, QtGui, QtOpenGL
import pyqtgraph as pg
#comms stuff
import lcm
from mithl import vectorXf_t
from mithl import trigger_t
from mithl import velocity_t
from mithl import auto_braking_t
from lcm_utils import *
#read yaml config information
import yaml
class BrakingWidget(QtGui.QWidget):
''' Pod Visualization window. Plots pod state with pyqtgraph and an OpenGL window.
Room for more stuff here.'''
def __init__(self, config, lc=None, parent=None, name=None):
super(BrakingWidget, self).__init__(parent)
self.lc = lc
if name:
self.setObjectName(name)
self.startTime = time.time()
brakingLayout = QtGui.QVBoxLayout()
self.brakingSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.brakingSlider.setFocusPolicy(QtCore.Qt.StrongFocus)
self.brakingSlider.setTickPosition(QtGui.QSlider.TicksBothSides)
self.brakingSlider.setTickInterval(1)
self.brakingSlider.setSingleStep(1)
self.brakingSlider.setRange(1,25)
self.brakingSlider.setValue(17)
self.brakingSlider.valueChanged.connect(self.handle_braking_slider)
self.brakingLabel = QtGui.QLabel("Braking")
self.brakingLabel.setAlignment(QtCore.Qt.AlignCenter)
brakingLayout.addWidget(self.brakingLabel)
brakingLayout.addWidget(self.brakingSlider)
self.setLayout(brakingLayout)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(33)
self.handle_braking_slider()
def update(self):
pass
def handle_braking_slider(self): #higher braking slope means more braking aggression
auto_braking_msg = auto_braking_t()
auto_braking_msg.slope = -self.brakingSlider.value()/100.0
auto_braking_msg.desiredDistanceToEnd = 50.0
auto_braking_msg.kP = 100.0
print "Requested braking slope: "+str(auto_braking_msg.slope)
self.lc.publish("AUTO_BRAKING", auto_braking_msg.encode())
if __name__ == '__main__':
# hook up interrupt signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
with open('../config/simConfig.yaml', 'r') as f:
config = yaml.load(f)
lc = create_lcm()
app = QtGui.QApplication(sys.argv)
window = BrakingWidget(config, lc=lc)
window.show()
start_lcm(lc)
sys.exit(app.exec_())
| lgpl-3.0 | -6,407,577,211,359,666,000 | 27.0625 | 88 | 0.673719 | false |
mikhaelharswanto/ryu | ryu/tests/unit/packet/test_ipv6.py | 4 | 25463 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import inspect
import struct
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.lib import addrconv
from ryu.lib import ip
from ryu.lib.packet import ipv6
LOG = logging.getLogger(__name__)
class Test_ipv6(unittest.TestCase):
def setUp(self):
self.version = 6
self.traffic_class = 0
self.flow_label = 0
self.payload_length = 817
self.nxt = 6
self.hop_limit = 128
self.src = '2002:4637:d5d3::4637:d5d3'
self.dst = '2001:4860:0:2001::68'
self.ext_hdrs = []
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.v_tc_flow = (
self.version << 28 | self.traffic_class << 20 |
self.flow_label << 12)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
def setUp_with_hop_opts(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = '\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.hop_opts_nxt = 6
self.hop_opts_size = 0
self.hop_opts = ipv6.hop_opts(
self.hop_opts_nxt, self.hop_opts_size, self.options)
self.ext_hdrs = [self.hop_opts]
self.payload_length += len(self.hop_opts)
self.nxt = ipv6.hop_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.hop_opts.serialize()
def setUp_with_dst_opts(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = '\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.dst_opts_nxt = 6
self.dst_opts_size = 0
self.dst_opts = ipv6.dst_opts(
self.dst_opts_nxt, self.dst_opts_size, self.options)
self.ext_hdrs = [self.dst_opts]
self.payload_length += len(self.dst_opts)
self.nxt = ipv6.dst_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.dst_opts.serialize()
def setUp_with_fragment(self):
self.fragment_nxt = 6
self.fragment_offset = 50
self.fragment_more = 1
self.fragment_id = 123
self.fragment = ipv6.fragment(
self.fragment_nxt, self.fragment_offset, self.fragment_more,
self.fragment_id)
self.ext_hdrs = [self.fragment]
self.payload_length += len(self.fragment)
self.nxt = ipv6.fragment.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.fragment.serialize()
def setUp_with_auth(self):
self.auth_nxt = 6
self.auth_size = 4
self.auth_spi = 256
self.auth_seq = 1
self.auth_data = '\xa0\xe7\xf8\xab\xf9\x69\x1a\x8b\xf3\x9f\x7c\xae'
self.auth = ipv6.auth(
self.auth_nxt, self.auth_size, self.auth_spi, self.auth_seq,
self.auth_data)
self.ext_hdrs = [self.auth]
self.payload_length += len(self.auth)
self.nxt = ipv6.auth.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.auth.serialize()
def setUp_with_multi_headers(self):
self.opt1_type = 5
self.opt1_len = 2
self.opt1_data = '\x00\x00'
self.opt2_type = 1
self.opt2_len = 0
self.opt2_data = None
self.options = [
ipv6.option(self.opt1_type, self.opt1_len, self.opt1_data),
ipv6.option(self.opt2_type, self.opt2_len, self.opt2_data),
]
self.hop_opts_nxt = ipv6.auth.TYPE
self.hop_opts_size = 0
self.hop_opts = ipv6.hop_opts(
self.hop_opts_nxt, self.hop_opts_size, self.options)
self.auth_nxt = 6
self.auth_size = 4
self.auth_spi = 256
self.auth_seq = 1
self.auth_data = '\xa0\xe7\xf8\xab\xf9\x69\x1a\x8b\xf3\x9f\x7c\xae'
self.auth = ipv6.auth(
self.auth_nxt, self.auth_size, self.auth_spi, self.auth_seq,
self.auth_data)
self.ext_hdrs = [self.hop_opts, self.auth]
self.payload_length += len(self.hop_opts) + len(self.auth)
self.nxt = ipv6.hop_opts.TYPE
self.ip = ipv6.ipv6(
self.version, self.traffic_class, self.flow_label,
self.payload_length, self.nxt, self.hop_limit, self.src,
self.dst, self.ext_hdrs)
self.buf = struct.pack(
ipv6.ipv6._PACK_STR, self.v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
self.buf += self.hop_opts.serialize()
self.buf += self.auth.serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.version, self.ip.version)
eq_(self.traffic_class, self.ip.traffic_class)
eq_(self.flow_label, self.ip.flow_label)
eq_(self.payload_length, self.ip.payload_length)
eq_(self.nxt, self.ip.nxt)
eq_(self.hop_limit, self.ip.hop_limit)
eq_(self.src, self.ip.src)
eq_(self.dst, self.ip.dst)
eq_(str(self.ext_hdrs), str(self.ip.ext_hdrs))
def test_init_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_init()
def test_init_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_init()
def test_init_with_fragment(self):
self.setUp_with_fragment()
self.test_init()
def test_init_with_auth(self):
self.setUp_with_auth()
self.test_init()
def test_init_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_init()
def test_parser(self):
_res = self.ip.parser(str(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.version, res.version)
eq_(self.traffic_class, res.traffic_class)
eq_(self.flow_label, res.flow_label)
eq_(self.payload_length, res.payload_length)
eq_(self.nxt, res.nxt)
eq_(self.hop_limit, res.hop_limit)
eq_(self.src, res.src)
eq_(self.dst, res.dst)
eq_(str(self.ext_hdrs), str(res.ext_hdrs))
def test_parser_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_parser()
def test_parser_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_parser()
def test_parser_with_fragment(self):
self.setUp_with_fragment()
self.test_parser()
def test_parser_with_auth(self):
self.setUp_with_auth()
self.test_parser()
def test_parser_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
res = struct.unpack_from(ipv6.ipv6._PACK_STR, str(buf))
eq_(self.v_tc_flow, res[0])
eq_(self.payload_length, res[1])
eq_(self.nxt, res[2])
eq_(self.hop_limit, res[3])
eq_(self.src, addrconv.ipv6.bin_to_text(res[4]))
eq_(self.dst, addrconv.ipv6.bin_to_text(res[5]))
def test_serialize_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
hop_opts = ipv6.hop_opts.parser(str(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.hop_opts), repr(hop_opts))
def test_serialize_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
dst_opts = ipv6.dst_opts.parser(str(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.dst_opts), repr(dst_opts))
def test_serialize_with_fragment(self):
self.setUp_with_fragment()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
fragment = ipv6.fragment.parser(str(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.fragment), repr(fragment))
def test_serialize_with_auth(self):
self.setUp_with_auth()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
auth = ipv6.auth.parser(str(buf[ipv6.ipv6._MIN_LEN:]))
eq_(repr(self.auth), repr(auth))
def test_serialize_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ip.serialize(data, prev)
offset = ipv6.ipv6._MIN_LEN
hop_opts = ipv6.hop_opts.parser(str(buf[offset:]))
offset += len(hop_opts)
auth = ipv6.auth.parser(str(buf[offset:]))
eq_(repr(self.hop_opts), repr(hop_opts))
eq_(repr(self.auth), repr(auth))
def test_to_string(self):
ipv6_values = {'version': self.version,
'traffic_class': self.traffic_class,
'flow_label': self.flow_label,
'payload_length': self.payload_length,
'nxt': self.nxt,
'hop_limit': self.hop_limit,
'src': repr(self.src),
'dst': repr(self.dst),
'ext_hdrs': self.ext_hdrs}
_ipv6_str = ','.join(['%s=%s' % (k, ipv6_values[k])
for k, v in inspect.getmembers(self.ip)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
eq_(str(self.ip), ipv6_str)
eq_(repr(self.ip), ipv6_str)
def test_to_string_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_to_string()
def test_to_string_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_to_string()
def test_to_string_with_fragment(self):
self.setUp_with_fragment()
self.test_to_string()
def test_to_string_with_auth(self):
self.setUp_with_auth()
self.test_to_string()
def test_to_string_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_to_string()
def test_len(self):
eq_(len(self.ip), 40)
def test_len_with_hop_opts(self):
self.setUp_with_hop_opts()
eq_(len(self.ip), 40 + len(self.hop_opts))
def test_len_with_dst_opts(self):
self.setUp_with_dst_opts()
eq_(len(self.ip), 40 + len(self.dst_opts))
def test_len_with_fragment(self):
self.setUp_with_fragment()
eq_(len(self.ip), 40 + len(self.fragment))
def test_len_with_auth(self):
self.setUp_with_auth()
eq_(len(self.ip), 40 + len(self.auth))
def test_len_with_multi_headers(self):
self.setUp_with_multi_headers()
eq_(len(self.ip), 40 + len(self.hop_opts) + len(self.auth))
def test_default_args(self):
ip = ipv6.ipv6()
buf = ip.serialize(bytearray(), None)
res = struct.unpack(ipv6.ipv6._PACK_STR, str(buf))
eq_(res[0], 6 << 28)
eq_(res[1], 0)
eq_(res[2], 6)
eq_(res[3], 255)
eq_(res[4], addrconv.ipv6.text_to_bin('::'))
eq_(res[5], addrconv.ipv6.text_to_bin('::'))
# with extension header
ip = ipv6.ipv6(
nxt=0, ext_hdrs=[
ipv6.hop_opts(58, 0, [
ipv6.option(5, 2, '\x00\x00'),
ipv6.option(1, 0, None)])])
buf = ip.serialize(bytearray(), None)
res = struct.unpack(ipv6.ipv6._PACK_STR + '8s', str(buf))
eq_(res[0], 6 << 28)
eq_(res[1], 8)
eq_(res[2], 0)
eq_(res[3], 255)
eq_(res[4], addrconv.ipv6.text_to_bin('::'))
eq_(res[5], addrconv.ipv6.text_to_bin('::'))
eq_(res[6], '\x3a\x00\x05\x02\x00\x00\x01\x00')
def test_json(self):
jsondict = self.ip.to_jsondict()
ip = ipv6.ipv6.from_jsondict(jsondict['ipv6'])
eq_(str(self.ip), str(ip))
def test_json_with_hop_opts(self):
self.setUp_with_hop_opts()
self.test_json()
def test_json_with_dst_opts(self):
self.setUp_with_dst_opts()
self.test_json()
def test_json_with_fragment(self):
self.setUp_with_fragment()
self.test_json()
def test_json_with_auth(self):
self.setUp_with_auth()
self.test_json()
def test_json_with_multi_headers(self):
self.setUp_with_multi_headers()
self.test_json()
class Test_hop_opts(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 8
self.data = [
ipv6.option(5, 2, '\x00\x00'),
ipv6.option(1, 0, None),
ipv6.option(0xc2, 4, '\x00\x01\x00\x00'),
ipv6.option(1, 0, None),
]
self.hop = ipv6.hop_opts(self.nxt, self.size, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.nxt, self.size) \
+ self.data[0].serialize() \
+ self.data[1].serialize() \
+ self.data[2].serialize() \
+ self.data[3].serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.nxt, self.hop.nxt)
eq_(self.size, self.hop.size)
eq_(self.data, self.hop.data)
@raises(Exception)
def test_invalid_size(self):
ipv6.hop_opts(self.nxt, 1, self.data)
def test_parser(self):
_res = ipv6.hop_opts.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(str(self.data), str(res.data))
def test_serialize(self):
buf = self.hop.serialize()
res = struct.unpack_from(self.form, str(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
offset = struct.calcsize(self.form)
opt1 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt1)
opt2 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt2)
opt3 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt3)
opt4 = ipv6.option.parser(str(buf[offset:]))
eq_(5, opt1.type_)
eq_(2, opt1.len_)
eq_('\x00\x00', opt1.data)
eq_(1, opt2.type_)
eq_(0, opt2.len_)
eq_(None, opt2.data)
eq_(0xc2, opt3.type_)
eq_(4, opt3.len_)
eq_('\x00\x01\x00\x00', opt3.data)
eq_(1, opt4.type_)
eq_(0, opt4.len_)
eq_(None, opt4.data)
def test_len(self):
eq_(16, len(self.hop))
def test_default_args(self):
hdr = ipv6.hop_opts()
buf = hdr.serialize()
res = struct.unpack('!BB', str(buf[:2]))
eq_(res[0], 6)
eq_(res[1], 0)
opt = ipv6.option(type_=1, len_=4, data='\x00\x00\x00\x00')
eq_(str(buf[2:]), opt.serialize())
class Test_dst_opts(unittest.TestCase):
def setUp(self):
self.nxt = 60
self.size = 8
self.data = [
ipv6.option(5, 2, '\x00\x00'),
ipv6.option(1, 0, None),
ipv6.option(0xc2, 4, '\x00\x01\x00\x00'),
ipv6.option(1, 0, None),
]
self.dst = ipv6.dst_opts(self.nxt, self.size, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.nxt, self.size) \
+ self.data[0].serialize() \
+ self.data[1].serialize() \
+ self.data[2].serialize() \
+ self.data[3].serialize()
def tearDown(self):
pass
def test_init(self):
eq_(self.nxt, self.dst.nxt)
eq_(self.size, self.dst.size)
eq_(self.data, self.dst.data)
@raises(Exception)
def test_invalid_size(self):
ipv6.dst_opts(self.nxt, 1, self.data)
def test_parser(self):
_res = ipv6.dst_opts.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(str(self.data), str(res.data))
def test_serialize(self):
buf = self.dst.serialize()
res = struct.unpack_from(self.form, str(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
offset = struct.calcsize(self.form)
opt1 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt1)
opt2 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt2)
opt3 = ipv6.option.parser(str(buf[offset:]))
offset += len(opt3)
opt4 = ipv6.option.parser(str(buf[offset:]))
eq_(5, opt1.type_)
eq_(2, opt1.len_)
eq_('\x00\x00', opt1.data)
eq_(1, opt2.type_)
eq_(0, opt2.len_)
eq_(None, opt2.data)
eq_(0xc2, opt3.type_)
eq_(4, opt3.len_)
eq_('\x00\x01\x00\x00', opt3.data)
eq_(1, opt4.type_)
eq_(0, opt4.len_)
eq_(None, opt4.data)
def test_len(self):
eq_(16, len(self.dst))
def test_default_args(self):
hdr = ipv6.dst_opts()
buf = hdr.serialize()
res = struct.unpack('!BB', str(buf[:2]))
eq_(res[0], 6)
eq_(res[1], 0)
opt = ipv6.option(type_=1, len_=4, data='\x00\x00\x00\x00')
eq_(str(buf[2:]), opt.serialize())
class Test_option(unittest.TestCase):
def setUp(self):
self.type_ = 5
self.data = '\x00\x00'
self.len_ = len(self.data)
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!BB%ds' % self.len_
self.buf = struct.pack(self.form, self.type_, self.len_, self.data)
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.opt.type_)
eq_(self.len_, self.opt.len_)
eq_(self.data, self.opt.data)
def test_parser(self):
_res = ipv6.option.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.type_, res.type_)
eq_(self.len_, res.len_)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
eq_(self.len_, res[1])
eq_(self.data, res[2])
def test_len(self):
eq_(len(self.opt), 2 + self.len_)
class Test_option_pad1(Test_option):
def setUp(self):
self.type_ = 0
self.len_ = -1
self.data = None
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!B'
self.buf = struct.pack(self.form, self.type_)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
def test_default_args(self):
opt = ipv6.option()
buf = opt.serialize()
res = struct.unpack('!B', buf)
eq_(res[0], 0)
class Test_option_padN(Test_option):
def setUp(self):
self.type_ = 1
self.len_ = 0
self.data = None
self.opt = ipv6.option(self.type_, self.len_, self.data)
self.form = '!BB'
self.buf = struct.pack(self.form, self.type_, self.len_)
def test_serialize(self):
buf = self.opt.serialize()
res = struct.unpack_from(self.form, buf)
eq_(self.type_, res[0])
eq_(self.len_, res[1])
class Test_fragment(unittest.TestCase):
def setUp(self):
self.nxt = 44
self.offset = 50
self.more = 1
self.id_ = 123
self.fragment = ipv6.fragment(
self.nxt, self.offset, self.more, self.id_)
self.off_m = (self.offset << 3 | self.more)
self.form = '!BxHI'
self.buf = struct.pack(self.form, self.nxt, self.off_m, self.id_)
def test_init(self):
eq_(self.nxt, self.fragment.nxt)
eq_(self.offset, self.fragment.offset)
eq_(self.more, self.fragment.more)
eq_(self.id_, self.fragment.id_)
def test_parser(self):
_res = ipv6.fragment.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.offset, res.offset)
eq_(self.more, res.more)
eq_(self.id_, res.id_)
def test_serialize(self):
buf = self.fragment.serialize()
res = struct.unpack_from(self.form, str(buf))
eq_(self.nxt, res[0])
eq_(self.off_m, res[1])
eq_(self.id_, res[2])
def test_len(self):
eq_(8, len(self.fragment))
def test_default_args(self):
hdr = ipv6.fragment()
buf = hdr.serialize()
res = struct.unpack_from(ipv6.fragment._PACK_STR, buf)
eq_(res[0], 6)
eq_(res[1], 0)
eq_(res[2], 0)
class Test_auth(unittest.TestCase):
def setUp(self):
self.nxt = 0
self.size = 4
self.spi = 256
self.seq = 1
self.data = '\x21\xd3\xa9\x5c\x5f\xfd\x4d\x18\x46\x22\xb9\xf8'
self.auth = ipv6.auth(
self.nxt, self.size, self.spi, self.seq, self.data)
self.form = '!BB2xII12s'
self.buf = struct.pack(self.form, self.nxt, self.size, self.spi,
self.seq, self.data)
def test_init(self):
eq_(self.nxt, self.auth.nxt)
eq_(self.size, self.auth.size)
eq_(self.spi, self.auth.spi)
eq_(self.seq, self.auth.seq)
eq_(self.data, self.auth.data)
def test_parser(self):
_res = ipv6.auth.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.nxt, res.nxt)
eq_(self.size, res.size)
eq_(self.spi, res.spi)
eq_(self.seq, res.seq)
eq_(self.data, res.data)
def test_serialize(self):
buf = self.auth.serialize()
res = struct.unpack_from(self.form, str(buf))
eq_(self.nxt, res[0])
eq_(self.size, res[1])
eq_(self.spi, res[2])
eq_(self.seq, res[3])
eq_(self.data, res[4])
def test_len(self):
eq_((4 - 1) * 8, len(self.auth))
def test_default_args(self):
hdr = ipv6.auth()
buf = hdr.serialize()
LOG.info(repr(buf))
res = struct.unpack_from(ipv6.auth._PACK_STR, str(buf))
LOG.info(res)
eq_(res[0], 6)
eq_(res[1], 3)
eq_(res[2], 0)
eq_(res[3], 0)
eq_(buf[ipv6.auth._MIN_LEN:], '\x00\x00\x00\x00')
| apache-2.0 | 5,943,678,540,030,711,000 | 30.749377 | 75 | 0.547972 | false |
anryko/ansible | lib/ansible/modules/utilities/logic/import_role.py | 17 | 2834 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = r'''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead.
version_added: '2.4'
options:
name:
description:
- The name of the role to be executed.
type: str
required: true
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
type: str
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
type: str
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
type: str
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: yes
handlers_from:
description:
- File to load from a role's C(handlers/) directory.
type: str
default: main
version_added: '2.8'
notes:
- Handlers are made available to the whole play.
- Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
M(import_role) task.
- Unlike M(include_role) variable exposure is not configurable, and will always be exposed.
seealso:
- module: import_playbook
- module: import_tasks
- module: include_role
- module: include_tasks
- ref: playbooks_reuse_includes
description: More information related to including and importing playbooks, roles and tasks.
'''
EXAMPLES = r'''
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
'''
RETURN = r'''
# This module does not return anything except tasks to execute.
'''
| gpl-3.0 | -3,672,238,618,073,113,000 | 27.918367 | 118 | 0.679605 | false |
marcellodesales/svnedge-console | ext/linux/pkg-toolkit/pkg/vendor-packages/pkg/client/imageplan.py | 4 | 40807 | #!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
import os
import errno
import traceback
import itertools
import pkg.actions
import pkg.client.actuator as actuator
import pkg.client.api_errors as api_errors
import pkg.client.imagestate as imagestate
import pkg.client.pkgplan as pkgplan
import pkg.client.indexer as indexer
import pkg.fmri as fmri
import pkg.search_errors as se
import pkg.variant as variant
from pkg.client.filter import compile_filter
from pkg.misc import msg
UNEVALUATED = 0 # nothing done yet
EVALUATED_PKGS = 1 # established fmri changes
EVALUATED_OK = 2 # ready to execute
PREEXECUTED_OK = 3 # finished w/ preexecute
PREEXECUTED_ERROR = 4 # whoops
EXECUTED_OK = 5 # finished execution
EXECUTED_ERROR = 6 # failed
class ImagePlan(object):
"""An image plan takes a list of requested packages, an Image (and its
policy restrictions), and returns the set of package operations needed
to transform the Image to the list of requested packages.
Use of an ImagePlan involves the identification of the Image, the
Catalogs (implicitly), and a set of complete or partial package FMRIs.
The Image's policy, which is derived from its type and configuration
will cause the formulation of the plan or an exception state.
XXX In the current formulation, an ImagePlan can handle [null ->
PkgFmri] and [PkgFmri@Version1 -> PkgFmri@Version2], for a set of
PkgFmri objects. With a correct Action object definition, deletion
should be able to be represented as [PkgFmri@V1 -> null].
XXX Should we allow downgrades? There's an "arrow of time" associated
with the smf(5) configuration method, so it's better to direct
manipulators to snapshot-based rollback, but if people are going to do
"pkg delete fmri; pkg install fmri@v(n - 1)", then we'd better have a
plan to identify when this operation is safe or unsafe."""
def __init__(self, image, progtrack, check_cancelation,
recursive_removal=False, filters=None, variants=None,
noexecute=False):
if filters is None:
filters = []
self.image = image
self.state = UNEVALUATED
self.recursive_removal = recursive_removal
self.progtrack = progtrack
self.noexecute = noexecute
if noexecute:
self.__intent = imagestate.INTENT_EVALUATE
else:
self.__intent = imagestate.INTENT_PROCESS
self.target_fmris = []
self.target_rem_fmris = []
self.pkg_plans = []
self.new_users = set()
self.rem_users = set()
self.new_groups = set()
self.rem_groups = set()
self.target_insall_count = 0
self.target_update_count = 0
self.__directories = None
self.__cached_actions = {}
ifilters = [
"%s = %s" % (k, v)
for k, v in image.cfg_cache.filters.iteritems()
]
self.filters = [ compile_filter(f) for f in filters + ifilters ]
self.old_excludes = image.list_excludes()
self.new_excludes = image.list_excludes(variants)
self.check_cancelation = check_cancelation
self.actuators = None
self.update_index = True
self.preexecuted_indexing_error = None
def __str__(self):
if self.state == UNEVALUATED:
s = "UNEVALUATED:\n"
for t in self.target_fmris:
s = s + "+%s\n" % t
for t in self.target_rem_fmris:
s = s + "-%s\n" % t
return s
s = "Package changes:\n"
for pp in self.pkg_plans:
s = s + "%s\n" % pp
s = s + "Actuators:\n%s" % self.actuators
s = s + "Variants: %s -> %s\n" % (self.old_excludes, self.new_excludes)
return s
def get_plan(self, full=True):
if full:
return str(self)
output = ""
for pp in self.pkg_plans:
output += "%s -> %s\n" % (pp.origin_fmri,
pp.destination_fmri)
return output
def display(self):
for pp in self.pkg_plans:
msg("%s -> %s" % (pp.origin_fmri, pp.destination_fmri))
msg("Actuators:\n%s" % self.actuators)
def is_proposed_fmri(self, pfmri):
for pf in self.target_fmris:
if pfmri.is_same_pkg(pf):
return not pfmri.is_successor(pf)
return False
def is_proposed_rem_fmri(self, pfmri):
for pf in self.target_rem_fmris:
if pfmri.is_same_pkg(pf):
return True
return False
def propose_fmri(self, pfmri):
# is a version of fmri.stem in the inventory?
if self.image.has_version_installed(pfmri) and \
self.old_excludes == self.new_excludes:
return
# is there a freeze or incorporation statement?
# do any of them eliminate this fmri version?
# discard
#
# update so that we meet any optional dependencies
#
pfmri = self.image.constraints.apply_constraints_to_fmri(pfmri)
self.image.fmri_set_default_publisher(pfmri)
# Add fmri to target list only if it (or a successor) isn't
# there already.
for i, p in enumerate(self.target_fmris):
if pfmri.is_successor(p):
self.target_fmris[i] = pfmri
break
if p.is_successor(pfmri):
break
else:
self.target_fmris.append(pfmri)
return
def get_proposed_version(self, pfmri):
""" Return version of fmri already proposed, or None
if not proposed yet."""
for p in self.target_fmris:
if pfmri.get_name() == p.get_name():
return p
else:
return None
def older_version_proposed(self, pfmri):
# returns true if older version of this pfmri has been proposed
# already
for p in self.target_fmris:
if pfmri.is_successor(p):
return True
return False
# XXX Need to make sure that the same package isn't being added and
# removed in the same imageplan.
def propose_fmri_removal(self, pfmri):
if not self.image.has_version_installed(pfmri):
return
for i, p in enumerate(self.target_rem_fmris):
if pfmri.is_successor(p):
self.target_rem_fmris[i] = pfmri
break
else:
self.target_rem_fmris.append(pfmri)
def gen_new_installed_pkgs(self):
""" generates all the fmris in the new set of installed pkgs"""
assert self.state >= EVALUATED_PKGS
fmri_set = set(self.image.gen_installed_pkgs())
for p in self.pkg_plans:
p.update_pkg_set(fmri_set)
for pfmri in fmri_set:
yield pfmri
def gen_new_installed_actions(self):
"""generates actions in new installed image"""
for pfmri in self.gen_new_installed_pkgs():
m = self.image.get_manifest(pfmri)
for act in m.gen_actions(self.new_excludes):
yield act
def gen_new_installed_actions_bytype(self, atype):
"""generates actions in new installed image"""
for pfmri in self.gen_new_installed_pkgs():
m = self.image.get_manifest(pfmri)
for act in m.gen_actions_by_type(atype,
self.new_excludes):
yield act
def get_directories(self):
""" return set of all directories in target image """
# always consider var and var/pkg fixed in image....
# XXX should be fixed for user images
if self.__directories == None:
dirs = set(["var",
"var/pkg",
"var/sadm",
"var/sadm/install"])
for fmri in self.gen_new_installed_pkgs():
m = self.image.get_manifest(fmri)
for d in m.get_directories(self.new_excludes):
dirs.add(os.path.normpath(d))
self.__directories = dirs
return self.__directories
def get_actions(self, name, key=None):
"""Return a dictionary of actions of the type given by 'name'
describing the target image. If 'key' is given and not None,
the dictionary's key will be the name of the action type's key
attribute. Otherwise, it's a callable taking an action as an
argument which returns the key. This dictionary is cached for
quick future lookups."""
if key is None:
attr_name = pkg.actions.types[name].key_attr
key = lambda act: act.attrs[attr_name]
if (name, key) in self.__cached_actions:
return self.__cached_actions[(name, key)]
d = {}
for act in self.gen_new_installed_actions_bytype(name):
t = key(act)
d.setdefault(t, []).append(act)
self.__cached_actions[(name, key)] = d
return self.__cached_actions[(name, key)]
def evaluate_fmri(self, pfmri):
self.progtrack.evaluate_progress(pfmri)
self.image.state.set_target(pfmri, self.__intent)
if self.check_cancelation():
raise api_errors.CanceledException()
self.image.fmri_set_default_publisher(pfmri)
m = self.image.get_manifest(pfmri)
# check to make sure package is not tagged as being only
# for other architecture(s)
supported = m.get_variants("variant.arch")
if supported and self.image.get_arch() not in supported:
raise api_errors.PlanCreationException(badarch=(pfmri,
supported, self.image.get_arch()))
# build list of (action, fmri, constraint) of dependencies
a_list = [
(a,) + a.parse(self.image, pfmri.get_name())
for a in m.gen_actions_by_type("depend", self.new_excludes)
]
# Update constraints first to avoid problems w/ depth first
# traversal of dependencies; we may violate an existing
# constraint here.
if self.image.constraints.start_loading(pfmri):
for a, f, constraint in a_list:
self.image.constraints.update_constraints(
constraint)
self.image.constraints.finish_loading(pfmri)
# now check what work is required
for a, f, constraint in a_list:
# discover if we have an installed or proposed
# version of this pkg already; proposed fmris
# will always be newer
ref_fmri = self.get_proposed_version(f)
if not ref_fmri:
ref_fmri = self.image.get_version_installed(f)
# check if new constraint requires us to make any
# changes to already proposed pkgs or existing ones.
if not constraint.check_for_work(ref_fmri):
continue
# Apply any active optional/incorporation constraints
# from other packages
cf = self.image.constraints.apply_constraints_to_fmri(f)
# This will be the newest version of the specified
# dependency package. Package names specified in
# dependencies are treated as exact. Matches from the
# preferred publisher are used first, then matches from
# the same publisher as the evaluated fmri, and then
# first available. Callers can override this behavior
# by specifying the publisher prefix as part of the FMRI.
matches = list(self.image.inventory([ cf ], all_known=True,
matcher=fmri.exact_name_match, preferred=True,
ordered=False))
cf = matches[0][0]
evalpub = pfmri.get_publisher()
if len(matches) > 1 and not cf.preferred_publisher() \
and cf.get_publisher() != evalpub:
# If more than one match was returned, and it
# wasn't for the preferred publisher or for the
# same publisher as the fmri being evaluated,
# then try to find a match that has the same
# publisher as the fmri being evaluated.
for f, st in matches[1:]:
if f.get_publisher() == evalpub:
cf = f
break
self.propose_fmri(cf)
self.evaluate_fmri(cf)
self.image.state.set_target()
def add_pkg_plan(self, pfmri):
"""add a pkg plan to imageplan for fully evaluated frmi"""
m = self.image.get_manifest(pfmri)
pp = pkgplan.PkgPlan(self.image, self.progtrack, \
self.check_cancelation)
if self.old_excludes != self.new_excludes:
if self.image.install_file_present(pfmri):
pp.propose_reinstall(pfmri, m)
else:
pp.propose_destination(pfmri, m)
else:
try:
pp.propose_destination(pfmri, m)
except RuntimeError:
msg("pkg: %s already installed" % pfmri)
return
pp.evaluate(self.old_excludes, self.new_excludes)
if pp.origin_fmri:
self.target_update_count += 1
else:
self.target_insall_count += 1
self.pkg_plans.append(pp)
def evaluate_fmri_removal(self, pfmri):
# prob. needs breaking up as well
assert self.image.has_manifest(pfmri)
self.progtrack.evaluate_progress(pfmri)
dependents = set(self.image.get_dependents(pfmri,
self.progtrack))
# Don't consider those dependencies already being removed in
# this imageplan transaction.
dependents = dependents.difference(self.target_rem_fmris)
if dependents and not self.recursive_removal:
raise api_errors.NonLeafPackageException(pfmri,
dependents)
pp = pkgplan.PkgPlan(self.image, self.progtrack, \
self.check_cancelation)
self.image.state.set_target(pfmri, self.__intent)
m = self.image.get_manifest(pfmri)
try:
pp.propose_removal(pfmri, m)
except RuntimeError:
self.image.state.set_target()
msg("pkg %s not installed" % pfmri)
return
pp.evaluate([], self.old_excludes)
for d in dependents:
if self.is_proposed_rem_fmri(d):
continue
if not self.image.has_version_installed(d):
continue
self.target_rem_fmris.append(d)
self.progtrack.evaluate_progress(d)
self.evaluate_fmri_removal(d)
# Post-order append will ensure topological sorting for acyclic
# dependency graphs. Cycles need to be arbitrarily broken, and
# are done so in the loop above.
self.pkg_plans.append(pp)
self.image.state.set_target()
def evaluate(self):
assert self.state == UNEVALUATED
outstring = ""
# Operate on a copy, as it will be modified in flight.
for f in self.target_fmris[:]:
self.progtrack.evaluate_progress(f)
try:
self.evaluate_fmri(f)
except KeyError, e:
outstring += "Attempting to install %s " \
"causes:\n\t%s\n" % (f.get_name(), e)
if outstring:
raise RuntimeError("No packages were installed because "
"package dependencies could not be satisfied\n" +
outstring)
for f in self.target_fmris:
self.add_pkg_plan(f)
self.progtrack.evaluate_progress(f)
for f in self.target_rem_fmris[:]:
self.evaluate_fmri_removal(f)
self.progtrack.evaluate_progress(f)
# we now have a workable set of packages to add/upgrade/remove
# now combine all actions together to create a synthetic single
# step upgrade operation, and handle editable files moving from
# package to package. See theory comment in execute, below.
self.state = EVALUATED_PKGS
self.removal_actions = [
(p, src, dest)
for p in self.pkg_plans
for src, dest in p.gen_removal_actions()
]
self.update_actions = [
(p, src, dest)
for p in self.pkg_plans
for src, dest in p.gen_update_actions()
]
self.install_actions = [
(p, src, dest)
for p in self.pkg_plans
for src, dest in p.gen_install_actions()
]
self.progtrack.evaluate_progress()
self.actuators = actuator.Actuator()
# iterate over copy of removals since we're modding list
# keep track of deletion count so later use of index works
named_removals = {}
deletions = 0
for i, a in enumerate(self.removal_actions[:]):
# remove dir removals if dir is still in final image
if a[1].name == "dir" and \
os.path.normpath(a[1].attrs["path"]) in \
self.get_directories():
del self.removal_actions[i - deletions]
deletions += 1
continue
# store names of files being removed under own name
# or original name if specified
if a[1].name == "file":
attrs = a[1].attrs
fname = attrs.get("original_name",
"%s:%s" % (a[0].origin_fmri.get_name(),
attrs["path"]))
named_removals[fname] = \
(i - deletions,
id(self.removal_actions[i-deletions][1]))
self.actuators.scan_removal(a[1].attrs)
self.progtrack.evaluate_progress()
for a in self.install_actions:
# In order to handle editable files that move their path
# or change pkgs, for all new files with original_name
# attribute, make sure file isn't being removed by
# checking removal list. If it is, tag removal to save
# file, and install to recover cached version... caching
# is needed if directories are removed or don't exist
# yet.
if (a[2].name == "file" and "original_name" in
a[2].attrs and a[2].attrs["original_name"] in
named_removals):
cache_name = a[2].attrs["original_name"]
index = named_removals[cache_name][0]
assert(id(self.removal_actions[index][1]) ==
named_removals[cache_name][1])
self.removal_actions[index][1].attrs[
"save_file"] = cache_name
a[2].attrs["save_file"] = cache_name
self.actuators.scan_install(a[2].attrs)
self.progtrack.evaluate_progress()
# Go over update actions
l_actions = self.get_actions("hardlink",
lambda a: a.get_target_path())
l_refresh = []
for a in self.update_actions:
# For any files being updated that are the target of
# _any_ hardlink actions, append the hardlink actions
# to the update list so that they are not broken.
if a[2].name == "file":
path = a[2].attrs["path"]
if path in l_actions:
l_refresh.extend([
(a[0], l, l)
for l in l_actions[path]
])
# scan both old and new actions
# repairs may result in update action w/o orig action
if a[1]:
self.actuators.scan_update(a[1].attrs)
self.actuators.scan_update(a[2].attrs)
self.update_actions.extend(l_refresh)
# sort actions to match needed processing order
self.removal_actions.sort(key = lambda obj:obj[1], reverse=True)
self.update_actions.sort(key = lambda obj:obj[2])
self.install_actions.sort(key = lambda obj:obj[2])
remove_npkgs = len(self.target_rem_fmris)
npkgs = 0
nfiles = 0
nbytes = 0
nactions = 0
for p in self.pkg_plans:
nf, nb = p.get_xferstats()
nbytes += nb
nfiles += nf
nactions += p.get_nactions()
# It's not perfectly accurate but we count a download
# even if the package will do zero data transfer. This
# makes the pkg stats consistent between download and
# install.
npkgs += 1
self.progtrack.download_set_goal(npkgs, nfiles, nbytes)
self.progtrack.evaluate_done(self.target_insall_count, \
self.target_update_count, remove_npkgs)
self.state = EVALUATED_OK
def nothingtodo(self):
""" Test whether this image plan contains any work to do """
return not self.pkg_plans
def preexecute(self):
"""Invoke the evaluated image plan
preexecute, execute and postexecute
preexecute and execute actions need to be sorted across packages
"""
assert self.state == EVALUATED_OK
if self.nothingtodo():
self.state = PREEXECUTED_OK
return
# Checks the index to make sure it exists and is
# consistent. If it's inconsistent an exception is thrown.
# If it's totally absent, it will index the existing packages
# so that the incremental update that follows at the end of
# the function will work correctly. It also repairs the index
# for this BE so the user can boot into this BE and have a
# correct index.
if self.update_index:
try:
self.image.update_index_dir()
ind = indexer.Indexer(self.image,
self.image.get_manifest,
self.image.get_manifest_path,
progtrack=self.progtrack,
excludes=self.old_excludes)
if ind.check_index_existence():
try:
ind.check_index_has_exactly_fmris(
self.image.gen_installed_pkg_names())
except se.IncorrectIndexFileHash, e:
self.preexecuted_indexing_error = \
api_errors.WrapSuccessfulIndexingException(
e,
traceback.format_exc(),
traceback.format_stack()
)
ind.rebuild_index_from_scratch(
self.image.\
gen_installed_pkgs()
)
except se.IndexingException, e:
# If there's a problem indexing, we want to
# attempt to finish the installation anyway. If
# there's a problem updating the index on the
# new image, that error needs to be
# communicated to the user.
self.preexecuted_indexing_error = \
api_errors.WrapSuccessfulIndexingException(
e, traceback.format_exc(),
traceback.format_stack())
try:
try:
for p, src, dest in itertools.chain(
self.removal_actions,
self.install_actions,
self.update_actions):
if dest:
dest.preinstall(p, src)
else:
src.preremove(p)
for p in self.pkg_plans:
p.download()
except EnvironmentError, e:
if e.errno == errno.EACCES:
raise api_errors.PermissionsException(
e.filename)
raise
self.progtrack.download_done()
except:
self.state = PREEXECUTED_ERROR
raise
self.state = PREEXECUTED_OK
def execute(self):
"""Invoke the evaluated image plan
preexecute, execute and postexecute
execute actions need to be sorted across packages
"""
assert self.state == PREEXECUTED_OK
#
# what determines execution order?
#
# The following constraints are key in understanding imageplan
# execution:
#
# 1) All non-directory actions (files, users, hardlinks,
# symbolic links, etc.) must appear in only a single installed
# package.
#
# 2) All installed packages must be consistent in their view of
# action types; if /usr/openwin is a directory in one package,
# it must be a directory in all packages, never a symbolic link;
# this includes implicitly defined directories.
#
# A key goal in IPS is to be able to undergo an arbtrary
# transformation in package contents in a single step. Packages
# must be able to exchange files, convert directories to
# symbolic links, etc.; so long as the start and end states meet
# the above two constraints IPS must be able to transition
# between the states directly. This leads to the following:
#
# 1) All actions must be ordered across packages; packages
# cannot be updated one at a time.
#
# This is readily apparent when one considers two packages
# exchanging files in their new versions; in each case the
# package now owning the file must be installed last, but it
# is not possible for each package to be installed before the
# other. Clearly, all the removals must be done first,
# followed by the installs and updates.
#
# 2) Installs of new actions must preceed updates of existing
# ones.
#
# In order to accomodate changes of file ownership of
# existing files to a newly created user, it is necessary
# for the installation of that user to preceed the update of
# files to reflect their new ownership.
#
if self.nothingtodo():
self.state = EXECUTED_OK
return
# It's necessary to do this check here because the state of the
# image before the current operation is performed is desired.
empty_image = self.is_image_empty()
self.actuators.exec_prep(self.image)
self.actuators.exec_pre_actuators(self.image)
try:
try:
# execute removals
self.progtrack.actions_set_goal(
_("Removal Phase"),
len(self.removal_actions))
for p, src, dest in self.removal_actions:
p.execute_removal(src, dest)
self.progtrack.actions_add_progress()
self.progtrack.actions_done()
# execute installs
self.progtrack.actions_set_goal(
_("Install Phase"),
len(self.install_actions))
for p, src, dest in self.install_actions:
p.execute_install(src, dest)
self.progtrack.actions_add_progress()
self.progtrack.actions_done()
# execute updates
self.progtrack.actions_set_goal(
_("Update Phase"),
len(self.update_actions))
for p, src, dest in self.update_actions:
p.execute_update(src, dest)
self.progtrack.actions_add_progress()
self.progtrack.actions_done()
# handle any postexecute operations
for p in self.pkg_plans:
p.postexecute()
# write out variant changes to the image config
self.image.image_config_update()
self.image.clear_pkg_state()
except EnvironmentError, e:
if e.errno == errno.EACCES or \
e.errno == errno.EPERM:
raise api_errors.PermissionsException(
e.filename)
elif e.errno == errno.EROFS:
raise api_errors.ReadOnlyFileSystemException(e.filename)
raise
except:
self.actuators.exec_fail_actuators(self.image)
raise
else:
self.actuators.exec_post_actuators(self.image)
self.state = EXECUTED_OK
# reduce memory consumption
del self.removal_actions
del self.update_actions
del self.install_actions
del self.target_rem_fmris
del self.target_fmris
del self.__directories
del self.actuators
# Perform the incremental update to the search indexes
# for all changed packages
if self.update_index:
plan_info = [
(p.destination_fmri, p.origin_fmri)
for p
in self.pkg_plans
]
del self.pkg_plans
self.progtrack.actions_set_goal(_("Index Phase"),
len(plan_info))
self.image.update_index_dir()
ind = indexer.Indexer(self.image,
self.image.get_manifest,
self.image.get_manifest_path,
progtrack=self.progtrack,
excludes=self.new_excludes)
try:
if empty_image:
ind.setup()
if empty_image or ind.check_index_existence():
ind.client_update_index((self.filters,
plan_info), self.image)
except KeyboardInterrupt:
raise
except se.ProblematicPermissionsIndexException:
# ProblematicPermissionsIndexException
# is included here as there's little
# chance that trying again will fix this
# problem.
raise api_errors.WrapIndexingException(e,
traceback.format_exc(),
traceback.format_stack())
except Exception, e:
# It's important to delete and rebuild
# from scratch rather than using the
# existing indexer because otherwise the
# state will become confused.
del(ind)
# XXX Once we have a framework for
# emitting a message to the user in this
# spot in the code, we should tell them
# something has gone wrong so that we
# continue to get feedback to allow
# us to debug the code.
try:
ind = indexer.Indexer(self.image,
self.image.get_manifest,
self.image.get_manifest_path,
progtrack=self.progtrack,
excludes=self.new_excludes)
ind.rebuild_index_from_scratch(
self.image.gen_installed_pkgs())
except Exception, e:
raise api_errors.WrapIndexingException(
e, traceback.format_exc(),
traceback.format_stack())
raise \
api_errors.WrapSuccessfulIndexingException(
e, traceback.format_exc(),
traceback.format_stack())
if self.preexecuted_indexing_error is not None:
raise self.preexecuted_indexing_error
def is_image_empty(self):
try:
self.image.gen_installed_pkg_names().next()
return False
except StopIteration:
return True
| agpl-3.0 | -8,538,805,666,062,762,000 | 45.057562 | 96 | 0.453329 | false |
vmthunder/nova | nova/tests/virt/xenapi/image/test_bittorrent.py | 12 | 5657 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import pkg_resources
import six
from nova import context
from nova.i18n import _
from nova import test
from nova.tests.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import bittorrent
from nova.virt.xenapi import vm_utils
class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestBittorrentStore, self).setUp()
self.store = bittorrent.BittorrentStore()
self.mox = mox.Mox()
self.flags(torrent_base_url='http://foo',
connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
def mock_iter_eps(namespace):
return []
self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stubs.Set(
vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
def test_download_image(self):
params = {'image_id': 'fake_image_uuid',
'sr_path': '/fake/sr/path',
'torrent_download_stall_cutoff': 600,
'torrent_listen_port_end': 6891,
'torrent_listen_port_start': 6881,
'torrent_max_last_accessed': 86400,
'torrent_max_seeder_processes_per_host': 1,
'torrent_seed_chance': 1.0,
'torrent_seed_duration': 3600,
'torrent_url': 'http://foo/fake_image_uuid.torrent',
'uuid_stack': ['uuid1']}
self.stubs.Set(vm_utils, '_make_uuid_stack',
lambda *a, **kw: ['uuid1'])
self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
self.session.call_plugin_serialized(
'bittorrent', 'download_vhd', **params)
self.mox.ReplayAll()
self.store.download_image(self.context, self.session,
'fake_image_uuid')
self.mox.VerifyAll()
def test_upload_image(self):
self.assertRaises(NotImplementedError, self.store.upload_image,
self.context, self.session, mox.IgnoreArg, ['fake_vdi_uuid'],
'fake_image_uuid')
def bad_fetcher(image_id):
raise test.TestingException("just plain bad.")
def another_fetcher(image_id):
return "http://www.foobar.com/%s" % image_id
class MockEntryPoint(object):
name = "torrent_url"
def load(self):
return another_fetcher
class LookupTorrentURLTestCase(test.NoDBTestCase):
def setUp(self):
super(LookupTorrentURLTestCase, self).setUp()
self.store = bittorrent.BittorrentStore()
self.image_id = 'fakeimageid'
def _mock_iter_none(self, namespace):
return []
def _mock_iter_single(self, namespace):
return [MockEntryPoint()]
def test_default_fetch_url_no_base_url_set(self):
self.flags(torrent_base_url=None,
group='xenserver')
self.stubs.Set(pkg_resources, 'iter_entry_points',
self._mock_iter_none)
exc = self.assertRaises(
RuntimeError, self.store._lookup_torrent_url_fn)
self.assertEqual(_('Cannot create default bittorrent URL without'
' torrent_base_url set'
' or torrent URL fetcher extension'),
six.text_type(exc))
def test_default_fetch_url_base_url_is_set(self):
self.flags(torrent_base_url='http://foo',
group='xenserver')
self.stubs.Set(pkg_resources, 'iter_entry_points',
self._mock_iter_single)
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual('http://foo/fakeimageid.torrent',
lookup_fn(self.image_id))
def test_with_extension(self):
self.stubs.Set(pkg_resources, 'iter_entry_points',
self._mock_iter_single)
lookup_fn = self.store._lookup_torrent_url_fn()
self.assertEqual("http://www.foobar.com/%s" % self.image_id,
lookup_fn(self.image_id))
def test_multiple_extensions_found(self):
self.flags(torrent_base_url=None,
group='xenserver')
def mock_iter_multiple(namespace):
return [MockEntryPoint(), MockEntryPoint()]
self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
exc = self.assertRaises(
RuntimeError, self.store._lookup_torrent_url_fn)
self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
' Failing.'),
six.text_type(exc))
| apache-2.0 | 4,690,896,497,726,723,000 | 33.919753 | 78 | 0.599434 | false |
sivertkh/gtrackcore | gtrackcore/track_operations/benchmarking/UnionRuntime.py | 1 | 6134 |
from line_profiler import LineProfiler
import time
import timeit
import sys
import os
import glob
from collections import OrderedDict
from cStringIO import StringIO
from gtrackcore.track_operations.operations._Union import Union
from gtrackcore.track_operations.TrackContents import TrackContents
from gtrackcore.metadata import GenomeInfo
from gtrackcore.track.core.GenomeRegion import GenomeRegion
from gtrackcore.core.Api import importFile
from gtrackcore.core.Api import _trackNameExists
from gtrackcore.core.Api import listAvailableGenomes
from gtrackcore.core.Api import listAvailableTracks
from gtrackcore.track.core.Track import Track
from gtrackcore.track.format.TrackFormat import TrackFormatReq
from gtrackcore.track.format.TrackFormat import TrackFormat
class UnionBenchmark(object):
def __init__(self):
self.hg18 = list((GenomeRegion('hg18', c, 0, l)
for c, l in GenomeInfo.GENOMES['hg18'][
'size'].iteritems()))
self._importTrack()
#self.trackA = self._createTrackContent('h4k20me1', False)
#self.trackB = self._createTrackContent('h4k20me3', False)
#self.trackC = self._createTrackContent('lk-test1', False)
#self.trackD = self._createTrackContent('lk-test2', False)
#self.trackE = self._createTrackContent('wc-points-odd', False)
#self.trackF = self._createTrackContent('wc-points-even', False)
self.trackG = self._createTrackContent('p-odd-1456576672', False)
self.trackH = self._createTrackContent('p-even-1456662688', False)
def runUnionP(self):
resReq = TrackFormat([], None, None, None, None, None, None, None)
#resReq = TrackFormat(name='segments')
u = Union(self.trackE, self.trackF)
u.setResultTrackRequirements(resReq)
start = timeit.default_timer()
tc = u()
stop = timeit.default_timer()
print("Total runtime: Union of points: {0}".format(stop-start))
for r in tc.regions:
if r.chr == 'chr1':
print "======"
print len(tc.getTrackView(r).startsAsNumpyArray())
print tc.getTrackView(r).startsAsNumpyArray()[:100]
print "======"
def runUnionVP(self):
resReq = TrackFormat([], None, [], None, None, None, None, None)
#resReq = TrackFormat(name='segments')
u = Union(self.trackG, self.trackH)
u.setResultTrackRequirements(resReq)
start = timeit.default_timer()
tc = u()
stop = timeit.default_timer()
print("Total runtime: Union of valued points: {0}".format(stop-start))
for r in tc.regions:
if r.chr == 'chr1':
print "======"
print len(tc.getTrackView(r).startsAsNumpyArray())
print tc.getTrackView(r).startsAsNumpyArray()[:100]
print tc.getTrackView(r).valsAsNumpyArray()[:100]
print "======"
def runUnionLP(self):
resReq = TrackFormat([], None, None, None, None, [], None, None)
#resReq = TrackFormat(name='segments')
u = Union(self.trackC, self.trackD)
u.setResultTrackRequirements(resReq)
start = timeit.default_timer()
tc = u()
stop = timeit.default_timer()
print("Total runtime: Union of Linked points: {0}".format(stop-start))
def runUnionS(self):
resReq = TrackFormat([], [], None, None, None, None, None, None)
#resReq = TrackFormat(name='segments')
u = Union(self.trackA, self.trackB)
u.setResultTrackRequirements(resReq)
start = timeit.default_timer()
tc = u()
stop = timeit.default_timer()
print("Total runtime: Union of segments: {0}".format(stop-start))
"""
for r in tc.regions:
print "======"
print len(self.trackA.getTrackView(r).startsAsNumpyArray())
print len(self.trackB.getTrackView(r).startsAsNumpyArray())
print len(tc.getTrackView(r).startsAsNumpyArray())
print "======"
"""
#for t in tc.getTrackViews():
def _trackInGtrack(self, genome, trackName):
"""
Add this functionality to API..
"""
with Capturing() as output:
listAvailableTracks(genome)
for i in output:
if trackName in i:
return True
return False
def _createTrackContent(self, trackName, allowOverlaps):
trackName = trackName.split(':')
track = Track(trackName)
track.addFormatReq(TrackFormatReq(allowOverlaps=allowOverlaps,
borderHandling='crop'))
trackViewList = OrderedDict()
print self.hg18
print(type(self.hg18))
for region in self.hg18:
print(region)
print(type(region))
trackViewList[region] = track.getTrackView(region)
return TrackContents('hg18', trackViewList)
def _importTrack(self):
"""
Loads all gtrack files in the test_tracks folder into GTrackCore.
Will ignore tracks already in GTrackCore.
"""
genome = 'hg18'
for path in glob.glob("./test_tracks/*.gtrack"):
name = os.path.splitext(path.split('/')[-1])[0]
if not self._trackInGtrack(genome, name):
importFile(path, genome, name)
else:
print("Track already imported into gtrack")
class Capturing(list):
"""
Class used to capture the print output from the API. This should be
fixed by adding more functionality to the API.
From stackoverflow #16571150
"""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
if __name__ == '__main__':
a = UnionBenchmark()
#a.runUnionP()
a.runUnionVP()
#a.runUnionLP()
#a.runUnionS()
| gpl-3.0 | -621,302,844,013,529,500 | 28.63285 | 78 | 0.60613 | false |
Intel-Corporation/tensorflow | tensorflow/python/ops/ragged/ragged_gather_ops.py | 2 | 10731 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gather operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_ragged_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_tensor
#===============================================================================
# ragged_gather
#===============================================================================
# TODO(edloper): Add an `axis` argument
def gather(params, indices, validate_indices=None, axis=0, batch_dims=0,
name=None):
"""Gathers ragged slices from `params` axis `0` according to `indices`.
Returns `RaggedTensor` output, such that:
```python
output.shape = indices.shape + params.shape[1:]
output.ragged_rank = indices.shape.ndims + params.ragged_rank
output[i...j, d0...dn] = params[indices[i...j], d0...dn]
```
`params` may be ragged. `indices` may be ragged.
`indices` must have dtype `int32` or `int64`. If any index is out of bounds,
then an error is returned.
Examples:
```python
>>> params = tf.constant(['a', 'b', 'c', 'd', 'e'])
>>> indices = tf.constant([3, 1, 2, 1, 0])
>>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']])
>>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]])
>>> print ragged.gather(params, ragged_indices)
[['d', 'b', 'c'], ['b'], [], ['a']]
>>> print ragged.gather(ragged_params, indices)
[['e'], ['d'], [], ['d'], ['a', 'b', 'c']]
>>> print ragged.gather(ragged_params, ragged_indices)
[[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]]
```
Args:
params: The potentially ragged tensor from which to gather values. Must be
at least rank 1.
indices: The potentially ragged tensor indicating which values to gather.
Must have dtype `int32` or `int64`. Values must be in the range `[0,
params.shape[0]]`.
validate_indices: Ignored.
axis: Must be zero.
batch_dims: Must be zero.
name: A name for the operation (optional).
Returns:
A `RaggedTensor`, where `output.dtype=params.dtype` and
`output.shape=indices.shape + params.shape[1:]` and
`output.ragged_rank=indices.shape.ndims + params.ragged_rank`.
Raises:
ValueError: If indices.shape.ndims is not known statically.
"""
del validate_indices
if not isinstance(axis, int) or axis != 0:
raise ValueError('axis != 0 is not supported for ragged gather yet.')
if not isinstance(batch_dims, int) or batch_dims != 0:
raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')
with ops.name_scope(name, 'RaggedGather', [params, indices]):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
if ragged_tensor.is_ragged(indices):
return indices.with_values(gather(params, indices.values))
if not ragged_tensor.is_ragged(params):
return array_ops.gather(params, indices)
indices = ops.convert_to_tensor(indices)
if indices.shape.ndims is None:
raise ValueError('indices.shape.ndims must be known statically')
result = gen_ragged_array_ops.ragged_gather(
indices=indices,
params_dense_values=params.flat_values,
params_nested_splits=params.nested_row_splits,
OUTPUT_RAGGED_RANK=indices.shape.ndims + len(params.nested_row_splits) -
1)
# Compose the RaggedTensor from splits & values.
return ragged_tensor.RaggedTensor.from_nested_row_splits(
result.output_dense_values, result.output_nested_splits)
#===============================================================================
# ragged.gather_nd
#===============================================================================
def gather_nd(params, indices, batch_dims=0, name=None):
"""Gather slices from `params` using `n`-dimensional indices.
This operation is similar to `gather`, but it uses the innermost dimension
of `indices` to define a slice into `params`. In particular, if:
* `indices` has shape `[A1...AN, I]`
* `params` has shape `[B1...BM]`
Then:
* `result` has shape `[A1...AN, B_{I+1}...BM]`.
* `result[a1...aN] = params[indices[a1...aN, :]]`
Args:
params: A potentially ragged tensor with shape `[A1...AN, I]`.
indices: A potentially ragged tensor with shape `[B1...BM]`.
batch_dims: Must be zero.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`.
#### Examples:
```python
>>> params = tf.ragged.constant_value(
... [ [ ['000', '001'], ['010' ] ],
... [ ['100' ], ['110', '111', '112'], ['120'] ],
... [ [ ], ['210' ] ] ])
>>> # Gather 2D slices from a 3D tensor
>>> ragged.gather_nd(params, [[2], [0]])
[ [ [ ], ['210'] ]
[ ['000', '001'], ['010'] ] ]
>>> # Gather 1D slices from a 3D tensor
>>> ragged.gather_nd(params, [[2, 1], [0, 0]])
[['210'], ['000', '001']]
>>> # Gather scalars from a 3D tensor
>>> ragged.gather_nd(params, [[0, 0, 1], [1, 1, 2]])
['001', '112']
```
"""
if not isinstance(batch_dims, int) or batch_dims != 0:
raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')
if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)):
return array_ops.gather_nd(params, indices, name)
with ops.name_scope(name, 'RaggedGatherNd', [params, indices]):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
indices_shape = indices.shape
indices_ndims = indices_shape.ndims
if indices_ndims is None:
raise ValueError('indices.rank be statically known.')
if indices_ndims == 0:
raise ValueError('indices.rank must be at least 1.')
if (ragged_tensor.is_ragged(indices) and
indices_ndims == indices.ragged_rank + 1):
raise ValueError('The innermost dimension of indices may not be ragged')
# `index_size` is the "n" in "gather_nd" -- i.e., the number of dimensions
# that each index slices into.
index_size = tensor_shape.dimension_value(indices_shape[-1])
if index_size is None:
raise ValueError('indices.shape[-1] must be statically known.')
# If `indices` has more than 2 dimensions, then recurse. If `indices` is
# dense, then we convert it to ragged before recursing, and then convert
# the result back to `dense` if appropriate.
if indices_ndims > 2:
indices_is_dense = not ragged_tensor.is_ragged(indices)
if indices_is_dense:
indices = ragged_conversion_ops.from_tensor(
indices, ragged_rank=indices_ndims - 2)
result = indices.with_flat_values(gather_nd(params, indices.flat_values))
if (indices_is_dense and ragged_tensor.is_ragged(result) and
result.ragged_rank == indices_ndims - 2):
result = ragged_conversion_ops.to_tensor(result)
return result
# indices_ndims <= 2, and the innermost dimension of indices may not be
# ragged, so `indices` must not be ragged.
assert not ragged_tensor.is_ragged(indices)
assert ragged_tensor.is_ragged(params)
# Handle corner case: An empty index tuple selects the entire `params`
# value. So if `index_size` is zero, then tile `params`.
if index_size == 0:
params_ndims = params.ragged_rank + array_ops.rank(params.flat_values)
for dim in range(indices_ndims - 1):
params = ragged_array_ops.expand_dims(params, axis=0)
multiples = array_ops.concat([
array_ops.shape(indices)[:-1],
array_ops.ones([params_ndims], dtypes.int32)
],
axis=0)
return ragged_array_ops.tile(params, multiples)
# When index_size=1, we can just flatten the index tuples and use gather.
elif index_size == 1:
flattened_index_tuples = array_ops.reshape(indices, [-1])
return gather(params, flattened_index_tuples)
# Otherwise, params is a RaggedTensor, and indices is a 1D or 2D Tensor.
# Flatten both the index tuples and the params, such that the flattened
# index tuples point to the correct values in the flattened params; and
# then use ragged.gather on the flattened index tuples & params.
else:
indices = math_ops.cast(indices, dtypes.int64)
# Flatten the outermost 2 dimensions of the index tuples & params.
flattened_index_tuples = array_ops.gather(params.row_splits,
indices[..., 0])
flattened_index_tuples += indices[..., 1]
flattened_params = params.values
# Flatten any remaining dimensions.
for dim in range(2, index_size):
if not ragged_tensor.is_ragged(flattened_params):
flattened_index_tuples = array_ops.expand_dims(
flattened_index_tuples, axis=1)
flattened_index_tuples = array_ops.concat(
[flattened_index_tuples, indices[..., dim:]], axis=1)
return array_ops.gather_nd(flattened_params, flattened_index_tuples)
flattened_index_tuples = array_ops.gather(
flattened_params.row_starts(), flattened_index_tuples)
flattened_index_tuples += indices[..., dim]
flattened_params = flattened_params.values
# Gather using the flattened index tuples and params.
return gather(flattened_params, flattened_index_tuples)
| apache-2.0 | 907,893,956,134,233,600 | 40.114943 | 80 | 0.625198 | false |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/distutils/util.py | 1 | 19647 | """distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id: util.py 4802 2007-01-23 21:26:03Z vapier $"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
For non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
f.close()
if m is not None:
macver = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = 'macosx'
platver = os.uname()[2]
osmajor = int(platver.split('.')[0])
if osmajor >= 8 and \
get_config_vars().get('UNIVERSALSDK', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return apply(os.path.join, paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'mac':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
# Chop off volume name from start of path
elements = string.split(pathname, ":", 1)
pathname = ":" + elements[1]
return os.path.join(new_root, pathname)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and not os.environ.has_key('HOME'):
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if not os.environ.has_key('PLAT'):
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if local_vars.has_key(var_name):
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
apply(func, args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
lines = map(string.strip, lines)
header = string.join(lines, '\n' + 8*' ')
return header
| gpl-2.0 | 8,010,245,731,755,182,000 | 37.223735 | 78 | 0.586044 | false |
ThomasYeoLab/CBIG | stable_projects/fMRI_dynamics/Kong2021_pMFM/examples/scripts/CBIG_pMFM_basic_functions_example.py | 1 | 31953 | # /usr/bin/env python
'''
Written by Kong Xiaolu and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
'''
import csv
import math
import time
import numpy as np
import torch
import scipy.io as sio
'''********** Functions for computing simulated BOLD signals ***********'''
def CBIG_mfm_multi_simulation(parameter, sc_mat, t_epochlong, n_dup):
'''
Function used to generate the simulated BOLD signal using mean field model
and hemodynamic model
Each parameter set is ussed to simulated multiple times to get stable
result
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
Each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
sc_mat: N*N structural connectivity matrix
t_epochlong:total simulated time
n_dup: Number of times each parameter set is simulated
Returns:
bold_d: simulated BOLD signal
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Initializing system parameters
kstart = 0.
t_pre = 60 * 2
kend = t_pre + 60 * t_epochlong
d_t = 0.01
t_bold = 0.72
# Setting sampling ratio
k_p = torch.arange(kstart, kend + d_t, d_t)
n_num = parameter.shape[1]
n_set = n_dup * n_num
parameter = parameter.repeat(1, n_dup)
n_nodes = sc_mat.shape[0]
n_samples = k_p.shape[0]
# Initializing neural activity
y_t = torch.zeros((n_nodes, n_set))
d_y = torch.zeros((n_nodes, n_set))
# Initializing hemodynamic activity
f_mat = torch.ones((n_nodes, n_set, 4))
z_t = torch.zeros((n_nodes, n_set))
f_t = torch.ones((n_nodes, n_set))
v_t = torch.ones((n_nodes, n_set))
q_t = torch.ones((n_nodes, n_set))
f_mat[:, :, 0] = z_t
y_t[:, :] = 0.001
# Wiener process
w_coef = parameter[2 * n_nodes + 1:3 * n_nodes + 1, :] / math.sqrt(0.001)
if w_coef.shape[0] == 1:
w_coef = w_coef.repeat(n_nodes, 1)
w_l = k_p.shape[0]
d_w = math.sqrt(d_t) * torch.randn(n_dup, n_nodes, w_l + 1000)
p_costant = 0.34
v_0 = 0.02
k_1 = 4.3 * 28.265 * 3 * 0.0331 * p_costant
k_2 = 0.47 * 110 * 0.0331 * p_costant
k_3 = 0.53
count = 0
y_bold = torch.zeros((n_nodes, n_set, int(n_samples / (t_bold / d_t) + 1)))
# Warm up
start = time.time()
for i in range(1000):
d_y = CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
noise_level = d_w[:, :, i].repeat(1, 1, n_num).contiguous().view(
-1, n_nodes)
y_t = y_t + d_y * d_t + w_coef * torch.transpose(noise_level, 0, 1)
# Main body: calculation
for i in range(n_samples):
d_y = CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
noise_level = d_w[:, :, i + 1000].repeat(1, 1,
n_num).contiguous().view(
-1, n_nodes)
y_t = y_t + d_y * d_t + w_coef * torch.transpose(noise_level, 0, 1)
d_f = CBIG_mfm_rfMRI_BW_ode(y_t, f_mat)
f_mat = f_mat + d_f * d_t
z_t, f_t, v_t, q_t = torch.chunk(f_mat, 4, dim=2)
y_bold_temp = 100 / p_costant * v_0 * \
(k_1 * (1 - q_t) + k_2 * (1 - q_t / v_t) + k_3 * (1 - v_t))
y_bold[:, :, count] = y_bold_temp[:, :, 0]
count = count + ((i + 1) % (t_bold / d_t) == 0) * 1
elapsed = time.time() - start
print('The time used for calculating simulated BOLD signal is: ', elapsed)
# Downsampling
cut_index = int(t_pre / t_bold)
bold_d = y_bold[:, :, cut_index + 1:y_bold.shape[2]]
return bold_d
def CBIG_mfm_single_simulation(parameter, sc_mat, t_epochlong, d_t=0.01):
'''
Function used to generate the simulated BOLD signal using mean field model
and hemodynamic model
Each parameter set is ussed to simulated one time
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets
Each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
sc_mat: N*N structural connectivity matrix
t_epochlong:total simulated time
Returns:
bold_d: simulated BOLD signal
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Initializing system parameters
kstart = 0.
t_pre = 60 * 2
kend = t_pre + 60 * t_epochlong
t_bold = 0.72
# sampling ratio
k_p = torch.arange(kstart, kend + d_t, d_t)
n_nodes = sc_mat.shape[0]
n_samples = k_p.shape[0]
n_set = parameter.shape[1]
# Initializing neural activity
y_t = torch.zeros((n_nodes, n_set))
d_y = torch.zeros((n_nodes, n_set))
# Initializing hemodynamic activity
f_mat = torch.ones((n_nodes, n_set, 4))
z_t = torch.zeros((n_nodes, n_set))
f_t = torch.ones((n_nodes, n_set))
v_t = torch.ones((n_nodes, n_set))
q_t = torch.ones((n_nodes, n_set))
f_mat[:, :, 0] = z_t
y_t[:, :] = 0.001
# Wiener process
w_coef = parameter[2 * n_nodes + 1:3 * n_nodes + 1, :] / math.sqrt(0.001)
if w_coef.shape[0] == 1:
w_coef = w_coef.repeat(n_nodes, 1)
p_costant = 0.34
v_0 = 0.02
k_1 = 4.3 * 28.265 * 3 * 0.0331 * p_costant
k_2 = 0.47 * 110 * 0.0331 * p_costant
k_3 = 0.53
count = 0
y_bold = torch.zeros((n_nodes, n_set, int(n_samples / (t_bold / d_t) + 1)))
# Warm up
start = time.time()
for i in range(1000):
d_y = CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
y_t = y_t + d_y * d_t + w_coef * \
torch.randn(n_nodes, n_set) * math.sqrt(d_t)
# Main body: calculation
for i in range(n_samples):
d_y = CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat)
random_num = torch.randn(n_nodes, n_set)
y_t = y_t + d_y * d_t + w_coef * random_num * math.sqrt(d_t)
d_f = CBIG_mfm_rfMRI_BW_ode(y_t, f_mat)
f_mat = f_mat + d_f * d_t
z_t, f_t, v_t, q_t = torch.chunk(f_mat, 4, dim=2)
y_bold_temp = 100 / p_costant * v_0 * \
(k_1 * (1 - q_t) + k_2 * (1 - q_t / v_t) + k_3 * (1 - v_t))
y_bold[:, :, count] = y_bold_temp[:, :, 0]
count = count + ((i + 1) % (t_bold / d_t) == 0) * 1
elapsed = time.time() - start
print('The time used for calculating simulated BOLD signal is: ', elapsed)
# Downsampling
cut_index = int(t_pre / t_bold)
bold_d = y_bold[:, :, cut_index + 1:y_bold.shape[2]]
return bold_d
def CBIG_mfm_rfMRI_ode(y_t, parameter, sc_mat):
'''
This function is to calcualte the derivatives of synaptic gating variable S
Args:
y_t: N*M matrix represents synaptic gating variable
N is the number of ROI
M is the number of candidate parameter sets
parameter: (N*3+1)*M matrix.
Each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
sc_mat: N*N structural connectivity matrix
Returns:
dy: N*M matrix represents derivatives of synaptic gating
variable S
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Parameters for inputs and couplings
number_roi = sc_mat.shape[0]
J = 0.2609
w = parameter[0:number_roi, :]
G = parameter[2 * number_roi, :]
I0 = parameter[number_roi:2 * number_roi, :]
# Parameters for firing rate
a = 270
b = 108
d = 0.154
# Parameters for synaptic activity/currents
tau_s = 0.1
gamma_s = 0.641
# Total input x
x = J * w * y_t + J * G.repeat(number_roi, 1) * torch.mm(sc_mat, y_t) + I0
# Population firing rate
H = (a * x - b) / (1 - torch.exp(-d * (a * x - b)))
# Synaptic activity
dy = -1 / tau_s * y_t + gamma_s * (1 - y_t) * H
return dy
def CBIG_mfm_rfMRI_BW_ode(y_t, F):
'''
This fucntion is to implement the hemodynamic model
Args:
y_t: N*M matrix represents synaptic gating variable
N is the number of ROI
M is the number of candidate parameter sets
F: Hemodynamic activity variables
Returns:
dF: Derivatives of hemodynamic activity variables
'''
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# Hemodynamic model parameters
beta = 0.65
gamma = 0.41
tau = 0.98
alpha = 0.33
p_constant = 0.34
n_nodes = y_t.shape[0]
n_set = y_t.shape[1]
# Calculate derivatives
dF = torch.zeros((n_nodes, n_set, 4))
dF[:, :, 0] = y_t - beta * F[:, :, 0] - gamma * (F[:, :, 1] - 1)
dF[:, :, 1] = F[:, :, 0]
dF[:, :, 2] = 1 / tau * (F[:, :, 1] - F[:, :, 2]**(1 / alpha))
dF[:, :, 3] = 1 / tau * (F[:, :, 1] / p_constant * (1 - (1 - p_constant)**
(1 / F[:, :, 1])) -
F[:, :, 3] / F[:, :, 2] * F[:, :, 2]**(1 / alpha))
return dF
def CBIG_FCcorrelation_multi_simulation(emp_fc, bold_d, n_dup):
'''
This function is to calculate the FC correlation cost for multiple
simulation BOLD signal results
Args:
emp_fc: N*N group level FC matrix
N is number of ROI
bold_d: simulated BOLD signal
n_dup: Number of times each parameter set is simulated
Returns:
corr_cost: FC correlation cost
'''
fc_timestart = time.time()
# Calculate vectored simulated FC
n_set = bold_d.shape[1]
n_num = int(n_set / n_dup)
n_nodes = emp_fc.shape[0]
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
vect_len = int(n_nodes * (n_nodes - 1) / 2)
sim_fc_vector = torch.zeros(n_set, vect_len)
for i in range(n_set):
sim_fc = torch_corr(bold_d[:, i, :])
sim_fc_vector[i, :] = sim_fc[fc_mask]
# Average the simulated FCs with same parameter set
sim_fc_vector[sim_fc_vector != sim_fc_vector] = 0
sim_fc_num = torch.zeros(n_num, vect_len)
sim_fc_den = torch.zeros(n_num, 1)
for k in range(n_dup):
sim_fc_num = sim_fc_num + sim_fc_vector[k * n_num:(k + 1) * n_num, :]
sim_fc_den = sim_fc_den + \
(sim_fc_vector[k * n_num:(k + 1) * n_num, 0:1] != 0).float()
sim_fc_den[sim_fc_den == 0] = np.nan
sim_fc_ave = sim_fc_num / sim_fc_den
# Calculate FC correlation
emp_fcm = emp_fc[fc_mask].repeat(n_num, 1)
corr_mass = torch_corr2(torch_arctanh(sim_fc_ave), torch_arctanh(emp_fcm))
corr_cost = torch.diag(corr_mass)
corr_cost = corr_cost.cpu().numpy()
corr_cost = 1 - corr_cost
corr_cost[np.isnan(corr_cost)] = 10
fc_elapsed = time.time() - fc_timestart
print('Time using for calcualting FC correlation cost: ', fc_elapsed)
return corr_cost
def CBIG_FCcorrelation_single_simulation(emp_fc, bold_d, n_dup):
'''
This function is to calculate the FC correlation cost for single simulation
BOLD signal result
Args:
emp_fc: N*N group level FC matrix
N is number of ROI
bold_d: simulated BOLD signal
Returns:
corr_cost: FC correlation cost
'''
fc_timestart = time.time()
# Calculate vectored simulated FC
n_set = bold_d.shape[1]
n_nodes = emp_fc.shape[0]
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
vect_len = int(n_nodes * (n_nodes - 1) / 2)
sim_fc_vector = torch.zeros(n_set, vect_len)
for i in range(n_set):
sim_fc = torch_corr(bold_d[:, i, :])
sim_fc_vector[i, :] = sim_fc[fc_mask]
# Calculate FC correlation
sim_fc_numpy = sim_fc_vector.cpu().numpy()
emp_fc_numpy = emp_fc[fc_mask].cpu().numpy()
time_dup = int(n_set / n_dup)
corr_cost = np.zeros(time_dup)
for t in range(time_dup):
sim_fc_numpy_temp = sim_fc_numpy[t * n_dup:(t + 1) * n_dup, :]
sim_fc_mean = np.nanmean(sim_fc_numpy_temp, 0)
corrmean_temp = np.corrcoef(
np.arctanh(sim_fc_mean), np.arctanh(emp_fc_numpy))
corr_cost[t] = 1 - corrmean_temp[1, 0]
fc_elapsed = time.time() - fc_timestart
print('Time using for calcualting FC correlation cost: ', fc_elapsed)
return corr_cost
def CBIG_FCDKSstat_multi_simulation(emp_ks, bold_d, n_dup):
'''
This function is to calculate the FCD KS statistics cost for multiple
simulation BOLD signal results
Args:
emp_ks: Group level KS statistics for empirical data
bold_d: simulated BOLD signal
n_dup: Number of times each parameter set is simulated
Returns:
ks_cost: FCD KS statistics cost
'''
fcd_timestart = time.time()
# Initializing the FC and FCD masks
n_set = bold_d.shape[1]
n_num = int(n_set / n_dup)
n_nodes = bold_d.shape[0]
window_size = 83
time_lengh = 1200 - window_size + 1
sub_num = 10
resid_num = n_set % sub_num
fc_edgenum = int(n_nodes * (n_nodes - 1) / 2)
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
fc_maskm = torch.zeros(n_nodes * sub_num,
n_nodes * sub_num).type(torch.cuda.ByteTensor)
for i in range(sub_num):
fc_maskm[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
fc_mask_resid = torch.zeros(n_nodes * resid_num, n_nodes * resid_num).type(
torch.cuda.ByteTensor)
for i in range(resid_num):
fc_mask_resid[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
fcd_mask = torch.triu(torch.ones(time_lengh, time_lengh), 1) == 1
# Calculating CDF for simualted FCD matrices
fcd_hist = torch.ones(10000, n_set).cpu()
fc_mat = torch.zeros(fc_edgenum, sub_num, time_lengh)
batch_num = math.floor(n_set / sub_num)
fc_resid = torch.zeros(fc_edgenum, resid_num, time_lengh)
for b in range(batch_num):
bold_temp = bold_d[:, b * sub_num:(b + 1) * sub_num, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(0, time_lengh):
bold_fc = torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_maskm]
fc_mat[:, :, i] = torch.transpose(
cor_temp.view(sub_num, fc_edgenum), 0, 1)
for j in range(0, sub_num):
fcd_temp = torch_corr(torch.transpose(fc_mat[:, j, :], 0, 1))
fcd_hist[:, j + b * sub_num] = torch.histc(
fcd_temp[fcd_mask].cpu(), 10000, (0.0001 - 1), 1)
if resid_num != 0:
bold_temp = bold_d[:, batch_num * sub_num:n_set, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(time_lengh):
bold_fc = torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_mask_resid]
fc_resid[:, :, i] = torch.transpose(
cor_temp.view(resid_num, fc_edgenum), 0, 1)
for j in range(resid_num):
fcd_temp = torch_corr(torch.transpose(fc_resid[:, j, :], 0, 1))
fcd_hist[:, j + sub_num * batch_num] = torch.histc(
fcd_temp[fcd_mask].cpu(), 10000, (0.0001 - 1), 1)
fcd_histcum = np.cumsum(fcd_hist.numpy(), 0)
fcd_histcumM = fcd_histcum.copy()
fcd_histcumM[:, fcd_histcum[-1, :] != emp_ks[-1, 0]] = 0
# Calculating KS statistics cost
fcd_histcum_temp = np.zeros((10000, n_num))
fcd_histcum_num = np.zeros((1, n_num))
for k in range(n_dup):
fcd_histcum_temp = fcd_histcum_temp + \
fcd_histcumM[:, k * n_num:(k + 1) * n_num]
fcd_histcum_num = fcd_histcum_num + \
(fcd_histcumM[-1, k * n_num:(k + 1) * n_num] == emp_ks[-1, 0])
fcd_histcum_ave = fcd_histcum_temp / fcd_histcum_num
ks_diff = np.abs(fcd_histcum_ave - np.tile(emp_ks, [1, n_num]))
ks_cost = ks_diff.max(0) / emp_ks[-1, 0]
ks_cost[fcd_histcum_ave[-1, :] != emp_ks[-1, 0]] = 10
fcd_elapsed = time.time() - fcd_timestart
print('Time using for calcualting FCD KS statistics cost: ', fcd_elapsed)
return ks_cost
def CBIG_FCDKSstat_single_simulation(emp_ks, bold_d, n_dup, window_size=83):
'''
This function is to calculate the FCD KS statistics cost for single
simulation BOLD signal results
Args:
emp_ks: Group level KS statistics for empirical data
bold_d: simulated BOLD signal
Returns:
ks_cost: FCD KS statistics cost
'''
fcd_timestart = time.time()
# Initializing the FC and FCD masks
n_set = bold_d.shape[1]
n_nodes = bold_d.shape[0]
time_lengh = 1200 - window_size + 1
sub_num = 10
resid_num = n_set % sub_num
fc_edgenum = int(n_nodes * (n_nodes - 1) / 2)
fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1
fc_maskm = torch.zeros(n_nodes * sub_num,
n_nodes * sub_num).type(torch.cuda.ByteTensor)
for i in range(sub_num):
fc_maskm[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
fc_mask_resid = torch.zeros(n_nodes * resid_num, n_nodes * resid_num).type(
torch.cuda.ByteTensor)
for i in range(resid_num):
fc_mask_resid[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *
(i + 1)] = fc_mask
fcd_mask = torch.triu(torch.ones(time_lengh, time_lengh), 1) == 1
# Calculating CDF for simualted FCD matrices
fcd_hist = torch.ones(10000, n_set).cpu()
fc_mat = torch.zeros(fc_edgenum, sub_num, time_lengh)
batch_num = int(n_set / sub_num)
fc_resid = torch.zeros(fc_edgenum, resid_num, time_lengh)
for b in range(batch_num):
bold_temp = bold_d[:, b * sub_num:(b + 1) * sub_num, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(0, time_lengh):
bold_fc = torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_maskm]
fc_mat[:, :, i] = torch.transpose(
cor_temp.view(sub_num, fc_edgenum), 0, 1)
for j in range(0, sub_num):
fcd_temp = torch_corr(torch.transpose(fc_mat[:, j, :], 0, 1))
fcd_hist[:, j + b * sub_num] = torch.histc(
fcd_temp[fcd_mask].cpu(), 10000, (0.0001 - 1), 1)
if resid_num != 0:
bold_temp = bold_d[:, batch_num * sub_num:n_set, :]
bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)
for i in range(time_lengh):
bold_fc = torch_corr(bold_tempm[:, i:i + window_size])
cor_temp = bold_fc[fc_mask_resid]
fc_resid[:, :, i] = torch.transpose(
cor_temp.view(resid_num, fc_edgenum), 0, 1)
for j in range(resid_num):
fcd_temp = torch_corr(torch.transpose(fc_resid[:, j, :], 0, 1))
fcd_hist[:, j + sub_num * batch_num] = torch.histc(
fcd_temp[fcd_mask].cpu(), 10000, (0.0001 - 1), 1)
fcd_histcum = np.cumsum(fcd_hist.numpy(), 0)
# Calculating KS statistics cost
time_dup = int(n_set / n_dup)
ks_cost = np.zeros(time_dup)
for t in range(time_dup):
fcd_hist_temp = fcd_histcum[:, t * n_dup:(t + 1) * n_dup]
fcd_histcum_nn = fcd_hist_temp[:, fcd_hist_temp[-1, :] ==
emp_ks[-1, 0]]
fcd_hist_mean = np.mean(fcd_histcum_nn, 1)
ks_cost[t] = np.max(
np.abs(fcd_hist_mean - emp_ks[:, 0]) / emp_ks[-1, 0])
fcd_elapsed = time.time() - fcd_timestart
print('Time using for cost function: ', fcd_elapsed)
return ks_cost
def torch_corr(A):
'''
Self implemented correlation function used for GPU
'''
Amean = torch.mean(A, 1)
Ax = A - torch.transpose(Amean.repeat(A.shape[1], 1), 0, 1)
Astd = torch.mean(Ax**2, 1)
Amm = torch.mm(Ax, torch.transpose(Ax, 0, 1)) / A.shape[1]
Aout = torch.sqrt(torch.ger(Astd, Astd))
Acor = Amm / Aout
return Acor
def torch_corr2(A, B):
'''
Self implemented correlation function used for GPU
'''
Amean = torch.mean(A, 1)
Ax = A - torch.transpose(Amean.repeat(A.shape[1], 1), 0, 1)
Astd = torch.mean(Ax**2, 1)
Bmean = torch.mean(B, 1)
Bx = B - torch.transpose(Bmean.repeat(B.shape[1], 1), 0, 1)
Bstd = torch.mean(Bx**2, 1)
numerator = torch.mm(Ax, torch.transpose(Bx, 0, 1)) / A.shape[1]
denominator = torch.sqrt(torch.ger(Astd, Bstd))
torch_cor = numerator / denominator
return torch_cor
def torch_arctanh(A):
arctanh = 0.5 * torch.log((1 + A) / (1 - A))
return arctanh
'''********* Functions for computing FC & FCD costs ****************'''
def CBIG_combined_cost_train(parameter, n_dup):
'''
This function is implemented to calcualted the FC correlation and FCD KS
statistics combined cost for input parameter sets based on training data
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
n_dup: number of times each parameter set is simulated
Returns:
total_cost: summation of FC correlation cost and FCD KS statistics cost
corr_cost: FC correlation cost
ks_cost: FCD KS statistics cost
'''
# Loading training data
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
emp_fcd = sio.loadmat('../input/fcd_train.mat')
emp_fcd = np.array(emp_fcd['train_aveM'])
sc_mat_raw = csv_matrix_read('../input/sc_train.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = csv_matrix_read('../input/fc_train.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
# Calculating simualted BOLD signal using MFM
bold_d = CBIG_mfm_multi_simulation(parameter, sc_mat, 14.4, n_dup)
# Calculating FC correlation cost
fc_cost = CBIG_FCcorrelation_multi_simulation(emp_fc, bold_d, n_dup)
# Calculating FCD KS statistics cost
fcd_cost = CBIG_FCDKSstat_multi_simulation(emp_fcd, bold_d, n_dup)
# Calculating total cost
total_cost = fc_cost + fcd_cost
return total_cost, fc_cost, fcd_cost
def CBIG_combined_cost_validation(parameter, n_dup):
'''
This function is implemented to calcualted the FC correlation and FCD KS
statistics combined cost for input parameter sets based on validation data
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
n_dup: number of times each parameter set is simulated
Returns:
total_cost: summation of FC correlation cost and FCD KS statistics cost
corr_cost: FC correlation cost
ks_cost: FCD KS statistics cost
'''
# Loading validation data
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
emp_fcd = sio.loadmat('../input/fcd_vali.mat')
emp_fcd = np.array(emp_fcd['vali_aveM'])
sc_mat_raw = csv_matrix_read('../input/sc_vali.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = csv_matrix_read('../input/fc_vali.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
# Calculating simualted BOLD signal using MFM
bold_d = CBIG_mfm_multi_simulation(parameter, sc_mat, 14.4, n_dup)
# Calculating FC correlation cost
fc_cost = CBIG_FCcorrelation_multi_simulation(emp_fc, bold_d, n_dup)
# Calculating FCD KS statistics cost
fcd_cost = CBIG_FCDKSstat_multi_simulation(emp_fcd, bold_d, n_dup)
# Calculating total cost
total_cost = fc_cost + fcd_cost
return total_cost, fc_cost, fcd_cost
def CBIG_combined_cost_test(parameter, n_dup):
'''
This function is implemented to calcualted the FC correlation and FCD KS
statistics combined cost for input parameter sets based on test data
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
n_dup: number of times each parameter set is simulated
Returns:
total_cost: summation of FC correlation cost and FCD KS statistics cost
corr_cost: FC correlation cost
ks_cost: FCD KS statistics cost
'''
# Loading validation data
parameter = np.tile(parameter, [1, n_dup])
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
emp_fcd = sio.loadmat('../input/fcd_test.mat')
emp_fcd = np.array(emp_fcd['test_aveM'])
sc_mat_raw = csv_matrix_read('../input/sc_test.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = csv_matrix_read('../input/fc_test.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
# Calculating simualted BOLD signal using MFM
bold_d = CBIG_mfm_single_simulation(parameter, sc_mat, 14.4)
# Calculating FC correlation cost
fc_cost = CBIG_FCcorrelation_single_simulation(emp_fc, bold_d, n_dup)
# Calculating FCD KS statistics cost
fcd_cost = CBIG_FCDKSstat_single_simulation(emp_fcd, bold_d, n_dup)
# Calculating total cost
total_cost = fc_cost + fcd_cost
return total_cost, fc_cost, fcd_cost
def CBIG_combined_cost_test_highres(parameter, n_dup):
'''
This function is implemented to calcualted the FC correlation and FCD KS
statistics combined cost for input parameter sets based on test data
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
n_dup: number of times each parameter set is simulated
Returns:
total_cost: summation of FC correlation cost and FCD KS statistics cost
corr_cost: FC correlation cost
ks_cost: FCD KS statistics cost
'''
# Loading validation data
parameter = np.tile(parameter, [1, n_dup])
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
emp_fcd = sio.loadmat('../input/fcd_test.mat')
emp_fcd = np.array(emp_fcd['test_aveM'])
sc_mat_raw = csv_matrix_read('../input/sc_test.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = csv_matrix_read('../input/fc_test.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
# Calculating simualted BOLD signal using MFM
bold_d = CBIG_mfm_single_simulation(parameter, sc_mat, 14.4, d_t=0.001)
# Calculating FC correlation cost
fc_cost = CBIG_FCcorrelation_single_simulation(emp_fc, bold_d, n_dup)
# Calculating FCD KS statistics cost
fcd_cost = CBIG_FCDKSstat_single_simulation(emp_fcd, bold_d, n_dup)
# Calculating total cost
total_cost = fc_cost + fcd_cost
return total_cost, fc_cost, fcd_cost
def CBIG_combined_cost_test_differwin(parameter, n_dup, window_indi):
'''
This function is implemented to calcualted the FC correlation and FCD KS
statistics combined cost for input parameter sets based on test data
Args:
parameter: (N*3+1)*M matrix.
N is the number of ROI
M is the number of candidate parameter sets.
each column of matrix presents a parameter set, where:
parameter[0:N]: recurrent strength w
parameter[N:2*N]: external input I
parameter[2*N]: Gloable constant G
parameter[2*N+1:3*N+1]: noise amplitude sigma
n_dup: number of times each parameter set is simulated
window_indi:determine the size the sliding window size
'low': window size is 43
'high': window size is 125
Returns:
total_cost: summation of FC correlation cost and FCD KS statistics cost
corr_cost: FC correlation cost
ks_cost: FCD KS statistics cost
'''
# Loading validation data
parameter = np.tile(parameter, [1, n_dup])
parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()
if window_indi == 'low':
emp_fcd = sio.loadmat('../input/fcd_test_low_window.mat')
window = 43
elif window_indi == 'high':
emp_fcd = sio.loadmat('../input/fcd_test_high_window.mat')
window = 125
else:
raise ValueError('Input is not acceptable.')
emp_fcd = np.array(emp_fcd['test_aveM'])
sc_mat_raw = csv_matrix_read('../input/sc_test.csv')
sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2
sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()
emp_fc = csv_matrix_read('../input/fc_test.csv')
emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()
# Calculating simualted BOLD signal using MFM
bold_d = CBIG_mfm_single_simulation(parameter, sc_mat, 14.4)
# Calculating FC correlation cost
fc_cost = CBIG_FCcorrelation_single_simulation(emp_fc, bold_d, n_dup)
# Calculating FCD KS statistics cost
fcd_cost = CBIG_FCDKSstat_single_simulation(
emp_fcd, bold_d, n_dup, window_size=window)
# Calculating total cost
total_cost = fc_cost + fcd_cost
return total_cost, fc_cost, fcd_cost
def csv_matrix_read(filename):
'''
This function is used to read csv file into a numpy array
Args:
filename: input csv file
Returns:
out_array: output numpy array
'''
csv_file = open(filename, "r")
read_handle = csv.reader(csv_file)
out_list = []
R = 0
for row in read_handle:
out_list.append([])
for col in row:
out_list[R].append(float(col))
R = R + 1
out_array = np.array(out_list)
csv_file.close()
return out_array
| mit | 273,375,267,091,063,580 | 35.601375 | 79 | 0.577943 | false |
lionelz/networking-bambuk | networking_bambuk/ml2/bambuk_l2pop.py | 1 | 1674 |
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from oslo_log import log
LOG = log.getLogger(__name__)
class BambukL2Pop(l2population_rpc.L2populationRpcCallBackTunnelMixin):
def __init__(self):
self.connection = agent_rpc.create_consumers(
[self],
topics.AGENT,
[topics.L2POPULATION, topics.UPDATE],
start_listening=False
)
self.connection.consume_in_threads()
def fdb_add(self, context, fdb_entries):
LOG.debug('fdb_add %s' % fdb_entries)
def fdb_remove(self, context, fdb_entries):
LOG.debug('fdb_remove %s' % fdb_entries)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
LOG.debug('add_fdb_flow %s, %s, %s, %s, %s' % (
br, port_info, remote_ip, lvm, str(ofport)))
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
LOG.debug('del_fdb_flow %s, %s, %s, %s, %s' % (
br, port_info, remote_ip, lvm, str(ofport)))
def setup_tunnel_port(self, br, remote_ip, network_type):
LOG.debug('setup_tunnel_port %s, %s, %s' % (
br, remote_ip, network_type))
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
LOG.debug('cleanup_tunnel_port %s, %s, %s' % (
br, tun_ofport, tunnel_type))
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
LOG.debug('setup_entry_for_arp_reply %s, %s, %s, %s, %s' % (
br, action, local_vid, mac_address, ip_address))
| apache-2.0 | 1,707,994,943,119,956,200 | 34.617021 | 75 | 0.597372 | false |
jtackaberry/stagehand | external/metadata/image/IPTC.py | 1 | 5744 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# IPTC.py
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Thomas Schueppel <[email protected]>
# Maintainer: Dirk Meyer <[email protected]>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
# http://www.ap.org/apserver/userguide/codes.htm
# python imports
from struct import unpack
from .. import utils
mapping = {
'by-line title': 'title',
'headline': 'title',
'keywords': 'keywords',
'writer-editor': 'author',
'credit': 'author',
'by-line': 'author',
'country/primary location name': 'country',
'caption-abstract': 'description',
'city': 'city',
'sub-location': 'location'
}
# These names match the codes defined in ITPC's IIM record 2.
# copied from iptcinfo by Josh Carter, [email protected]
c_datasets = {
# 0: 'record version', # skip -- binary data
5: 'object name',
7: 'edit status',
8: 'editorial update',
10: 'urgency',
12: 'subject reference',
15: 'category',
20: 'supplemental category',
22: 'fixture identifier',
25: 'keywords',
26: 'content location code',
27: 'content location name',
30: 'release date',
35: 'release time',
37: 'expiration date',
38: 'expiration time',
40: 'special instructions',
42: 'action advised',
45: 'reference service',
47: 'reference date',
50: 'reference number',
55: 'date created',
60: 'time created',
62: 'digital creation date',
63: 'digital creation time',
65: 'originating program',
70: 'program version',
75: 'object cycle',
80: 'by-line',
85: 'by-line title',
90: 'city',
92: 'sub-location',
95: 'province/state',
100: 'country/primary location code',
101: 'country/primary location name',
103: 'original transmission reference',
105: 'headline',
110: 'credit',
115: 'source',
116: 'copyright notice',
118: 'contact',
120: 'caption-abstract',
122: 'writer-editor',
# 125: 'rasterized caption', # unsupported (binary data)
130: 'image type',
131: 'image orientation',
135: 'language identifier',
200: 'custom1', # These are NOT STANDARD, but are used by
201: 'custom2', # Fotostation. Use at your own risk. They're
202: 'custom3', # here in case you need to store some special
203: 'custom4', # stuff, but note that other programs won't
204: 'custom5', # recognize them and may blow them away if
205: 'custom6', # you open and re-save the file. (Except with
206: 'custom7', # Fotostation, of course.)
207: 'custom8',
208: 'custom9',
209: 'custom10',
210: 'custom11',
211: 'custom12',
212: 'custom13',
213: 'custom14',
214: 'custom15',
215: 'custom16',
216: 'custom17',
217: 'custom18',
218: 'custom19',
219: 'custom20',
}
def flatten(list):
try:
for i, val in list(list.items())[:]:
if len(val) == 0:
del list[i]
elif i == 'keywords':
list[i] = [ x.strip(' \t\0\n\r') for x in val ]
else:
list[i] = ' '.join(val).strip()
return list
except (ValueError, AttributeError, IndexError, KeyError):
return []
def parseiptc(app):
iptc = {}
if app[:14] == "Photoshop 3.0\x00":
app = app[14:]
# parse the image resource block
offset = 0
data = None
while app[offset:offset+4] == "8BIM":
offset = offset + 4
# resource code
code = unpack("<H", app[offset:offset+2])[0]
offset = offset + 2
# resource name (usually empty)
name_len = ord(app[offset])
name = app[offset+1:offset+1+name_len]
offset = 1 + offset + name_len
if offset & 1:
offset = offset + 1
# resource data block
size = unpack("<L", app[offset:offset+4])[0]
offset = offset + 4
if code == 0x0404:
# 0x0404 contains IPTC/NAA data
data = app[offset:offset+size]
break
offset = offset + size
if offset & 1:
offset = offset + 1
if not data:
return None
offset = 0
iptc = {}
while 1:
try:
intro = ord(data[offset])
except (ValueError, KeyError, IndexError):
return flatten(iptc)
if intro != 0x1c:
return flatten(iptc)
(tag, record, dataset, length) = unpack("!BBBH", data[offset:offset+5])
val = utils.tostr(data[offset+5:offset+length+5])
offset += length + 5
name = c_datasets.get(dataset)
if not name:
continue
if name in iptc:
iptc[name].append(val)
else:
iptc[name] = [val]
return flatten(iptc)
| mit | -2,915,659,367,457,643,500 | 29.073298 | 79 | 0.576253 | false |
hfp/tensorflow-xsmm | tensorflow/python/distribute/__init__.py | 5 | 1086 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution Strategy library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
# pylint: enable=unused-import
| apache-2.0 | 4,622,346,670,216,164,000 | 42.44 | 80 | 0.726519 | false |
celstark/bidskit | dcm2ndar.py | 1 | 22745 | #!/usr/bin/env python3
"""
Convert flat DICOM file set into an NDAR-compliant fileset
Usage
----
dcm2ndar.py -i <DICOM Directory> -o <NDAR Directory>
Example
----
% dcm2ndar.py -i sub-001 -o sub-001.ndar
Authors
----
Mike Tyszka, Caltech Brain Imaging Center
Dates
----
2016-08-09 JMT Adapt from dcm2bids.py
MIT License
Copyright (c) 2016 Mike Tyszka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = '0.1.0'
import os
import sys
import argparse
import subprocess
import pydicom
import json
import glob
import shutil
import nibabel as nib
from datetime import datetime
from dateutil import relativedelta
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='Convert DICOM files to NDAR-compliant fileset')
parser.add_argument('-i', '--indir', required=True, help='Source directory containing subject DICOM directories')
parser.add_argument('-o', '--outdir', required=False, help='Output directory for subject NDAR directories')
# Parse command line arguments
args = parser.parse_args()
dcm_root_dir = args.indir
if args.outdir:
ndar_root_dir = args.outdir
else:
ndar_root_dir = args.indir + '.ndar'
# Load protocol translation and exclusion info from DICOM directory
# If no translator is present, prot_dict is an empty dictionary
# and a template will be created in the DICOM directory. This template should be
# completed by the user and the conversion rerun.
prot_dict_json = os.path.join(dcm_root_dir, 'Protocol_Translator.json')
prot_dict = ndar_load_prot_dict(prot_dict_json)
# Set flag to write template protocol translator to DICOM directory
create_prot_dict = True
if prot_dict:
create_prot_dict = False
# Safe create output NDAR root directory
if os.path.isdir(ndar_root_dir):
shutil.rmtree(ndar_root_dir)
os.makedirs(ndar_root_dir)
# Loop over each subject's DICOM directory within the root source directory
for SID in os.listdir(dcm_root_dir):
dcm_sub_dir = os.path.join(dcm_root_dir, SID)
# Only process subdirectories
if os.path.isdir(dcm_sub_dir):
print('Processing subject ' + SID)
# Create subject directory
print(' Creating NDAR subject directory')
ndar_sub_dir = os.path.join(ndar_root_dir, SID)
subprocess.call(['mkdir', '-p', ndar_sub_dir])
# Create NDAR summary CSV for this subject
ndar_csv_fname = os.path.join(ndar_sub_dir, SID + '_NDAR.csv')
ndar_csv_fd = ndar_init_summary(ndar_csv_fname)
# Read additional subject-level DICOM header fields from first DICOM image
dcm_info = ndar_dcm_info(dcm_sub_dir)
# Run dcm2niix conversion from DICOM to Nifti with BIDS sidecars for metadata
# This relies on the current CBIC branch of dcm2niix which extracts additional DICOM fields
# required by NDAR
subprocess.call(['dcm2niix', '-b', 'y', '-f', 'sub-%n_%p', '-o', ndar_sub_dir, dcm_sub_dir])
# Loop over all Nifti files (*.nii, *.nii.gz) for this SID
# glob returns the full relative path from the NDAR root dir
for nii_fname_full in glob.glob(os.path.join(ndar_sub_dir, '*.nii*')):
# Read Nifti header for image FOV, extent (ie matrix) and voxel dimensions
print(' Reading Nifti header')
nii_info = ndar_nifti_info(nii_fname_full)
# Isolate base filename
nii_fname = os.path.basename(nii_fname_full)
# Parse file basename
SID, prot, fstub = ndar_parse_filename(nii_fname)
# Full path for file stub
fstub_full = os.path.join(ndar_sub_dir, fstub)
# Check if we're creating new protocol dictionary
if create_prot_dict:
print(' Adding protocol %s to dictionary' % prot)
# Add current protocol to protocol dictionary
# The value defaults to "EXCLUDE" which should be replaced with the correct NDAR
# ImageDescription for this protocol (eg "T1w Structural", "BOLD MB EPI Resting State")
prot_dict[prot] = "EXCLUDE"
else:
# JSON sidecar for this image
json_fname = fstub_full + '.json'
if not os.path.isfile(json_fname):
print('* JSON sidecar not found')
break
# Skip excluded protocols
if prot_dict[prot] == 'EXCLUDE':
print('* Excluding protocol ' + prot)
# Remove all files related to this protocol
for f in glob.glob(fstub_full + '.*'):
os.remove(f)
else:
print(' Converting protocol ' + prot)
# Read JSON sidecar contents
json_fd = open(json_fname, 'r')
info = json.load(json_fd)
json_fd.close()
# Combine JSON, Nifti and DICOM info dictionaries
info.update(nii_info)
info.update(dcm_info)
# Add remaining fields not in JSON or DICOM metadata
info['SID'] = SID
info['ImageFile'] = os.path.basename(nii_fname)
info['ImageDescription'] = prot_dict[prot]
info['ScanType'] = ndar_scantype(prot_dict[prot])
info['Orientation'] = ndar_orientation(info)
# Add row to NDAR summary CSV file
ndar_add_row(ndar_csv_fd, info)
# Delete JSON file
os.remove(json_fname)
# Close NDAR summary file for this subject
ndar_close_summary(ndar_csv_fd)
# Create combined protocol translator in DICOM root directory if necessary
if create_prot_dict:
ndar_create_prot_dict(prot_dict_json, prot_dict)
# Clean exit
sys.exit(0)
def ndar_load_prot_dict(prot_dict_json):
'''
Read protocol translations from JSON file
:param prot_dict_json:
:return:
'''
if os.path.isfile(prot_dict_json):
# Read JSON protocol translator
json_fd = open(prot_dict_json, 'r')
prot_trans = json.load(json_fd)
json_fd.close()
else:
print('* Protocol translator missing')
print('* Creating template translator in %s' % prot_dict_json)
# Initialize empty dictionary to be filled during subsequent file loop
prot_trans = dict()
return prot_trans
def ndar_create_prot_dict(prot_dict_json, prot_dict):
'''
Write protocol translation dictionary template to JSON file
:param prot_dict_json:
:param prot_dict:
:return:
'''
json_fd = open(prot_dict_json, 'w')
json.dump(prot_dict, json_fd, indent=4, separators=(',', ':'))
json_fd.close()
print('')
print('---')
print('New protocol dictionary created : %s' % prot_dict_json)
print('Remember to replace "EXCLUDE" values in dictionary with an appropriate image description')
print('For example "MP-RAGE T1w 3D structural" or "MB-EPI BOLD resting-state')
print('---')
print('')
return
def ndar_parse_filename(fname):
"""
Extract SID and protocol string from filename in the form sub-<SID>_<Protocol String>.[nii or nii.gz]
:param fname:
:return: SID, prot, fstub
"""
# Init return values
SID, prot, fstub = 'None', 'None', 'None'
# Strip .nii or .nii.gz from fname
fstub = fname.replace('.nii.gz','').replace('.nii','')
# Split stub at first underscore
for chunk in fstub.split('_', 1):
if chunk.startswith('sub-'):
# SID is everything after "sub-" in this chunk
_, SID = chunk.split('sub-', 1)
else:
prot = chunk
return SID, prot, fstub
def ndar_scantype(desc):
"""
Best effort guess at scan type from description
NDAR allowed MRI scan_type values
----
fMRI
MR structural (T1)
MR structural (T2)
MR structural (PD)
MR structural (FSPGR);
MR structural (MPRAGE)
MR structural (PD, T2)
MR structural (B0 map)
MR structural (B1 map);
Field Map
MR diffusion
single-shell DTI
multi-shell DTI
ASL
:param desc:
:return scan_type:
"""
# Convert description to upper case
desc = desc.upper()
# Search for common contrasts
if 'T1' in desc:
scan_type = 'MR structural (T1)'
elif 'T2' in desc:
scan_type = 'MR structural (T2)'
elif 'FIELDMAP' in desc or 'FMAP' in desc or 'FIELD MAP' in desc or 'B0' in desc:
scan_type = 'MR structural (B0 map)'
elif 'BOLD' in desc:
scan_type = 'fMRI'
else:
scan_type = 'MR structural (T1)' # T1 structural fallback value
return scan_type
def ndar_orientation(info):
orientation = 'Axial'
if 'spc3d' in info['PulseSequenceDetails']:
orientation = 'Sagittal'
if 'tfl3d' in info['PulseSequenceDetails']:
orientation = 'Sagittal'
return orientation
def ndar_nifti_info(nii_fname):
'''
Extract Nifti header fields not handled by dcm2niix
:param nii_fname: Nifti image filename
:return: nii_info: Nifti information dictionary
'''
# Init a new dictionary
nii_info = dict()
# Load Nifti header
nii = nib.load(nii_fname)
hdr = nii.header
dim = hdr['dim']
res = hdr['pixdim']
# Fill dictionary
nii_info['AcquisitionMatrix'] = '%dx%d' % (dim[1], dim[2])
nii_info['NDims'] = dim[0]
nii_info['ImageExtent1'] = dim[1]
nii_info['ImageExtent2'] = dim[2]
nii_info['ImageExtent3'] = dim[3]
nii_info['ImageExtent4'] = dim[4]
nii_info['ImageExtent5'] = dim[5]
nii_info['ImageResolution1'] = res[1]
nii_info['ImageResolution2'] = res[2]
nii_info['ImageResolution3'] = res[3]
nii_info['ImageResolution4'] = res[4]
nii_info['ImageResolution5'] = res[5]
# Use z dimension voxel spacing as slice thickness
nii_info['SliceThickness'] = dim[3]
if dim[0] > 3:
nii_info['Extent4Type'] = 'Timeseries'
else:
nii_info['Extent4Type'] = 'None'
return nii_info
def ndar_dcm_info(dcm_dir):
"""
Extract additional subject-level DICOM header fields not handled by dcm2niix
from first DICOM image in directory
:param dcm_dir: DICOM directory containing subject files
:return: dcm_info: extra information dictionary
"""
# Loop over files until first valid DICOM is found
ds = []
for dcm in os.listdir(dcm_dir):
try:
ds = pydicom.read_file(os.path.join(dcm_dir, dcm))
except:
pass
# Break out if valid DICOM read
if ds:
break
# Init a new dictionary
dcm_info = dict()
# Read DoB and scan date
dob = ds.PatientBirthDate
scan_date = ds.AcquisitionDate
# Calculate age in months at time of scan using datetime functions
d1 = datetime.strptime(dob, '%Y%M%d')
d2 = datetime.strptime(scan_date, '%Y%M%d')
rd = relativedelta.relativedelta(d2, d1)
# Approximation since residual day to month conversion assumes 1 month = 30 days
age_months = rd.years * 12 + rd.months + round(rd.days / 30.0)
# Fill dictionary
dcm_info['Sex'] = ds.PatientSex
dcm_info['PatientPosition'] = ds.PatientPosition
dcm_info['TransmitCoil'] = ds.TransmitCoilName
dcm_info['SoftwareVersions'] = ds.SoftwareVersions
dcm_info['PhotometricInterpretation'] = ds.PhotometricInterpretation
dcm_info['AgeMonths'] = age_months
dcm_info['ScanDate'] = datetime.strftime(d2, '%M/%d/%Y') # NDAR scan date format MM/DD/YYYY
return dcm_info
def ndar_init_summary(fname):
'''
Open a summary CSV file and initialize with NDAR Image03 preamble
:param fname:
:return:
'''
# Write NDAR Image03 preamble and column headers
ndar_fd = open(fname, 'w')
ndar_fd.write('"image","03"\n')
ndar_fd.write('"subjectkey","src_subject_id","interview_date","interview_age","gender","comments_misc",')
ndar_fd.write('"image_file","image_thumbnail_file","image_description","experiment_id","scan_type","scan_object",')
ndar_fd.write('"image_file_format","data_file2","data_file2_type","image_modality","scanner_manufacturer_pd",')
ndar_fd.write('"scanner_type_pd","scanner_software_versions_pd","magnetic_field_strength",')
ndar_fd.write('"mri_repetition_time_pd","mri_echo_time_pd","flip_angle","acquisition_matrix",')
ndar_fd.write('"mri_field_of_view_pd","patient_position","photomet_interpret",')
ndar_fd.write('"receive_coil","transmit_coil","transformation_performed","transformation_type","image_history",')
ndar_fd.write('"image_num_dimensions","image_extent1","image_extent2","image_extent3",')
ndar_fd.write('"image_extent4","extent4_type","image_extent5","extent5_type",')
ndar_fd.write('"image_unit1","image_unit2","image_unit3","image_unit4","image_unit5",')
ndar_fd.write('"image_resolution1","image_resolution2","image_resolution3","image_resolution4",')
ndar_fd.write('"image_resolution5","image_slice_thickness","image_orientation",')
ndar_fd.write('"qc_outcome","qc_description","qc_fail_quest_reason","decay_correction","frame_end_times",')
ndar_fd.write('"frame_end_unit","frame_start_times","frame_start_unit","pet_isotope","pet_tracer",')
ndar_fd.write('"time_diff_inject_to_image","time_diff_units","pulse_seq","slice_acquisition","software_preproc",')
ndar_fd.write('"study","week","experiment_description","visit","slice_timing",')
ndar_fd.write('"bvek_bval_files","bvecfile","bvalfile"')
# Final newline
ndar_fd.write('\n')
return ndar_fd
def ndar_close_summary(fd):
fd.close()
return
def ndar_add_row(fd, info):
"""
Write a single experiment row to the NDAR summary CSV file
:param fd:
:param info:
:return:
"""
# Field descriptions for NDAR Image03 MRI experiments
# ElementName, DataType, Size, Required, ElementDescription, ValueRange, Notes, Aliases
# subjectkey,GUID,,Required,The NDAR Global Unique Identifier (GUID) for research subject,NDAR*,,
fd.write('"TBD",')
# src_subject_id,String,20,Required,Subject ID how it's defined in lab/project,,,
fd.write('"%s",' % info.get('SID','Unknown'))
# interview_date,Date,,Required,Date on which the interview/genetic test/sampling/imaging was completed. MM/DD/YYYY,,Required field,ScanDate
fd.write('"%s",' % info.get('ScanDate','Unknown'))
# interview_age,Integer,,Required,Age in months at the time of the interview/test/sampling/imaging.,0 :: 1260,
# "Age is rounded to chronological month. If the research participant is 15-days-old at time of interview,
# the appropriate value would be 0 months. If the participant is 16-days-old, the value would be 1 month.",
fd.write('%d,' % info.get('AgeMonths','Unknown'))
# gender,String,20,Required,Sex of the subject,M;F,M = Male; F = Female,
fd.write('"%s",' % info.get('Sex','Unknown'))
# comments_misc
fd.write('"",')
# image_file,File,,Required,"Data file (image, behavioral, anatomical, etc)",,,file_source
fd.write('"%s",' % info.get('ImageFile','Unknown'))
# image_thumbnail_file
fd.write('"",')
# Image description and scan type overlap strongly (eg fMRI), so we'll use the translated description provided
# by the user in the protocol dictionary for both NDAR fields. The user description should provide information
# about both the sequence type used (eg MB-EPI or MP-RAGE) and the purpose of the scan (BOLD resting-state,
# T1w structural, B0 fieldmap phase).
# Note the 50 character limit for scan type.
# image_description,String,512,Required,"Image description, i.e. DTI, fMRI, Fast SPGR, phantom, EEG, dynamic PET",,,
fd.write('"%s",' % info.get('ImageDescription','Unknown'))
# experiment_id,Integer,,Conditional,ID for the Experiment/settings/run,,,
fd.write('"",')
# scan_type,String,50,Required,Type of Scan,
# "MR diffusion; fMRI; MR structural (MPRAGE); MR structural (T1); MR structural (PD); MR structural (FSPGR);
# MR structural (T2); PET; ASL; microscopy; MR structural (PD, T2); MR structural (B0 map); MR structural (B1 map);
# single-shell DTI; multi-shell DTI; Field Map; X-Ray",,
fd.write('"%s",' % info.get('ScanType'))
# scan_object,String,50,Required,"The Object of the Scan (e.g. Live, Post-mortem, or Phantom",Live; Post-mortem; Phantom,,
fd.write('"Live",')
# image_file_format,String,50,Required,Image file format,
# AFNI; ANALYZE; AVI; BIORAD; BMP; BRIK; BRUKER; CHESHIRE; COR; DICOM; DM3; FITS; GE GENESIS; GE SIGNA4X; GIF;
# HEAD; ICO; ICS; INTERFILE; JPEG; LSM; MAGNETOM VISION; MEDIVISION; MGH; MICRO CAT; MINC; MIPAV XML; MRC; NIFTI;
# NRRD; OSM; PCX; PIC; PICT; PNG; QT; RAW; SPM; STK; TIFF; TGA; TMG; XBM; XPM; PARREC; MINC HDF; LIFF; BFLOAT;
# SIEMENS TEXT; ZVI; JP2; MATLAB; VISTA; ecat6; ecat7;,,
fd.write('"NIFTI",')
# data_file2
fd.write('"",')
# data_file2_type
fd.write('"",')
# image_modality,String,20,Required,Image modality, MRI;
fd.write('"MRI",')
# scanner_manufacturer_pd,String,30,Conditional,Scanner Manufacturer,,,
fd.write('"%s",' % info.get('Manufacturer','Unknown'))
# scanner_type_pd,String,50,Conditional,Scanner Type,,,ScannerID
fd.write('"%s",' % info.get('ManufacturersModelName','Unknown'))
# scanner_software_versions_pd
fd.write('"%s",' % info.get('SoftwareVersions','Unknown'))
# magnetic_field_strength,String,50,Conditional,Magnetic field strength,,,
fd.write('%f,' % info.get('MagneticFieldStrength','Unknown'))
# mri_repetition_time_pd,Float,,Conditional,Repetition Time (seconds),,,
fd.write('%0.4f,' % info.get('RepetitionTime',-1.0))
# mri_echo_time_pd,Float,,Conditional,Echo Time (seconds),,,
fd.write('%0.4f,' % info.get('EchoTime',-1.0))
# flip_angle,String,30,Conditional,Flip angle,,,
fd.write('%0.1f,' % info.get('FlipAngle',-1.0))
# MRI conditional fields
fd.write('"%s",' % info.get('AcquisitionMatrix')) # acquisition_matrix
fd.write('"%s",' % info.get('FOV')) # mri_field_of_view_pd
fd.write('"%s",' % info.get('PatientPosition')) # patient_position
fd.write('"%s",' % info.get('PhotometricInterpretation')) # photomet_interpret
fd.write('"",') # receive_coil
fd.write('"%s",' % info.get('TransmitCoil')) # transmit_coil
fd.write('"No",') # transformation_performed
fd.write('"",') # transformation_type
fd.write('"",') # image_history
fd.write('%d,' % info.get('NDims')) # image_num_dimensions
fd.write('%d,' % info.get('ImageExtent1')) # image_extent1
fd.write('%d,' % info.get('ImageExtent2')) # image_extent2
fd.write('%d,' % info.get('ImageExtent3')) # image_extent3
fd.write('%d,' % info.get('ImageExtent4')) # image_extent4
fd.write('"%s",' % info.get('Extent4Type')) # extent4_type
fd.write('"",') # image_extent5
fd.write('"",') # extent5_type
fd.write('"Millimeters",') # image_unit1
fd.write('"Millimeters",') # image_unit2
fd.write('"Millimeters",') # image_unit3
fd.write('"Seconds",') # image_unit4
fd.write('"",') # image_unit5
fd.write('%0.3f,' % info.get('ImageResolution1')) # image_resolution1
fd.write('%0.3f,' % info.get('ImageResolution2')) # image_resolution2
fd.write('%0.3f,' % info.get('ImageResolution3')) # image_resolution3
fd.write('%0.3f,' % info.get('ImageResolution4')) # image_resolution4
fd.write('%0.3f,' % info.get('ImageResolution5')) # image_resolution5
fd.write('%0.3f,' % info.get('SliceThickness')) # image_slice_thickness
fd.write('"%s",' % info.get('Orientation')) # image_orientation
fd.write('"",') # qc_outcome
fd.write('"",') # qc_description
fd.write('"",') # qc_fail_quest_reason
fd.write('"",') # decay_correction
fd.write('"",') # frame_end_times
fd.write('"",') # frame_end_unit
fd.write('"",') # frame_start_times
fd.write('"",') # frame_start_unit
fd.write('"",') # pet_isotope
fd.write('"",') # pet_tracer
fd.write('"",') # time_diff_inject_to_image
fd.write('"",') # time_diff_units
fd.write('"",') # pulse_seq
fd.write('"",') # slice_acquisition
fd.write('"None",') # software_preproc
fd.write('"",') # study
fd.write('"",') # week
fd.write('"",') # experiment_description
fd.write('"",') # visit
fd.write('"%s",' % str(info.get('SliceTiming'))) # slice_timing
fd.write('"",') # bvek_bval_files
fd.write('"",') # bvecfile
fd.write('"",') # bvalfile
# Final newline
fd.write('\n')
return
def strip_extensions(fname):
fstub, fext = os.path.splitext(fname)
if fext == '.gz':
fstub, fext = os.path.splitext(fstub)
return fstub
def ndar_include_prot(prot, prot_excludes):
'''
Returns False if protocol is in exclude list
:param prot:
:param prot_excludes:
:return:
'''
status = True
for pe in prot_excludes:
if pe in prot:
status = False
return status
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| mit | -5,248,550,081,232,596,000 | 34.65047 | 144 | 0.625588 | false |
diogommartins/ryu | ryu/lib/packet/ipv4.py | 28 | 5481 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from . import packet_base
from . import packet_utils
from . import icmp
from . import igmp
from . import udp
from . import tcp
from . import sctp
from . import ospf
from . import in_proto as inet
from ryu.lib import addrconv
IPV4_ADDRESS_PACK_STR = '!I'
IPV4_ADDRESS_LEN = struct.calcsize(IPV4_ADDRESS_PACK_STR)
IPV4_PSEUDO_HEADER_PACK_STR = '!4s4s2xHH'
class ipv4(packet_base.PacketBase):
"""IPv4 (RFC 791) header encoder/decoder class.
NOTE: When decoding, this implementation tries to decode the upper
layer protocol even for a fragmented datagram. It isn't likely
what a user would want.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
IPv4 addresses are represented as a string like '192.0.2.1'.
__init__ takes the corresponding args in this order.
============== ======================================== ==================
Attribute Description Example
============== ======================================== ==================
version Version
header_length IHL
tos Type of Service
total_length Total Length
(0 means automatically-calculate
when encoding)
identification Identification
flags Flags
offset Fragment Offset
ttl Time to Live
proto Protocol
csum Header Checksum
(Ignored and automatically-calculated
when encoding)
src Source Address '192.0.2.1'
dst Destination Address '192.0.2.2'
option A bytearray which contains the entire
Options, or None for no Options
============== ======================================== ==================
"""
_PACK_STR = '!BBHHHBBH4s4s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': [
'src', 'dst'
]
}
def __init__(self, version=4, header_length=5, tos=0,
total_length=0, identification=0, flags=0,
offset=0, ttl=255, proto=0, csum=0,
src='10.0.0.1',
dst='10.0.0.2',
option=None):
super(ipv4, self).__init__()
self.version = version
self.header_length = header_length
self.tos = tos
self.total_length = total_length
self.identification = identification
self.flags = flags
self.offset = offset
self.ttl = ttl
self.proto = proto
self.csum = csum
self.src = src
self.dst = dst
self.option = option
def __len__(self):
return self.header_length * 4
@classmethod
def parser(cls, buf):
(version, tos, total_length, identification, flags, ttl, proto, csum,
src, dst) = struct.unpack_from(cls._PACK_STR, buf)
header_length = version & 0xf
version = version >> 4
offset = flags & ((1 << 13) - 1)
flags = flags >> 13
length = header_length * 4
if length > ipv4._MIN_LEN:
option = buf[ipv4._MIN_LEN:length]
else:
option = None
msg = cls(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum,
addrconv.ipv4.bin_to_text(src),
addrconv.ipv4.bin_to_text(dst), option)
return msg, ipv4.get_packet_type(proto), buf[length:total_length]
def serialize(self, payload, prev):
length = len(self)
hdr = bytearray(length)
version = self.version << 4 | self.header_length
flags = self.flags << 13 | self.offset
if self.total_length == 0:
self.total_length = self.header_length * 4 + len(payload)
struct.pack_into(ipv4._PACK_STR, hdr, 0, version, self.tos,
self.total_length, self.identification, flags,
self.ttl, self.proto, 0,
addrconv.ipv4.text_to_bin(self.src),
addrconv.ipv4.text_to_bin(self.dst))
if self.option:
assert (length - ipv4._MIN_LEN) >= len(self.option)
hdr[ipv4._MIN_LEN:ipv4._MIN_LEN + len(self.option)] = self.option
self.csum = packet_utils.checksum(hdr)
struct.pack_into('!H', hdr, 10, self.csum)
return hdr
ipv4.register_packet_type(icmp.icmp, inet.IPPROTO_ICMP)
ipv4.register_packet_type(igmp.igmp, inet.IPPROTO_IGMP)
ipv4.register_packet_type(tcp.tcp, inet.IPPROTO_TCP)
ipv4.register_packet_type(udp.udp, inet.IPPROTO_UDP)
ipv4.register_packet_type(sctp.sctp, inet.IPPROTO_SCTP)
ipv4.register_packet_type(ospf.ospf, inet.IPPROTO_OSPF)
| apache-2.0 | 3,032,456,156,906,829,000 | 35.785235 | 78 | 0.574713 | false |
mpacula/AutoCorpus | src/tests/driver.py | 1 | 2990 | #!/usr/bin/env python
"""
AutoCorpus: automatically extracts clean natural language corpora from
publicly available datasets.
driver.py: tests Autocorpus tools based on a test text file.
Copyright (C) 2011 Maciej Pacula
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import subprocess
from optparse import OptionParser
# poor man's check_output for python < 2.7
def execute(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output, err = process.communicate()
ret = process.poll()
if ret != 0:
raise subprocess.CalledProcessError(ret, cmd, output=output)
return output
def stripLines(text):
lines = text.split('\n')
output = ""
for line in lines:
output += line.strip() + '\n'
return output
def performTest(command, expectedOutput):
print "> " + command
output = execute(command)
output = stripLines(output.strip())
expectedOutput = stripLines(expectedOutput.strip())
print output + "\n"
if output != expectedOutput:
print "ERROR. Expected:"
print expectedOutput + "\n"
return False
else:
return True
if __name__ == "__main__":
parser = OptionParser(usage="%s file" % sys.argv[0])
(options, args) = parser.parse_args()
if len(args) != 1:
print "usage: " + parser.usage
exit(1)
f = open(args[0], 'r')
command = None
expectedOutput = ""
totalTests = 0
failedTests = 0
for line in f:
if line.strip().startswith("#"):
continue
elif line.startswith("--"):
success = performTest(command, expectedOutput)
totalTests += 1
failedTests += 0 if success else 1
command = None
expectedOutput = ""
elif command != None:
expectedOutput += line
elif len(line.strip()) > 0:
command = line.strip()
if command != None:
success = performTest(command, expectedOutput)
totalTests += 1
failedTests += 0 if success else 1
f.close()
print "----------------------------"
print "%-20s %d\n%-20s %d\n%-20s %d" % ("TESTS RUN:", totalTests,
"TESTS PASSED: ", totalTests - failedTests,
"TESTS FAILED:", failedTests)
exit (0 if failedTests == 0 else 1)
| agpl-3.0 | -833,297,781,155,915,000 | 28.60396 | 76 | 0.622742 | false |
nthmost/rheti-python | rheti/question.py | 1 | 1051 | import random
class InvalidChoice(Exception):
pass
class Choice(class):
""" Describes a question option (choice), its reference code (e.g. Q21a), and the value that
selecting this Choice represents to the test.
"""
def __init__(self, text, code, value):
self.text = text
self.value = value
self.code = code
def select(self):
""" Returns the value of this Choice. """
return self.coded_value
class Question(class):
""" Describes a question and its possible Choices. """
def __init__(self, text, choices):
self.text = text
self.choices = choices
def random_choice(self):
return random.choice(self.choices)
def choose(self, code):
""" Takes the 'code' matching to the desired Choice and returns the 'value' of that Choice. """
for choice in self.choices:
if choice.code == code:
return choice.select()
raise InvalidChoice('Code "%s" does not match a Choice for this Question.' % code)
| mit | -739,113,703,456,876,900 | 25.275 | 103 | 0.613701 | false |
alexschiller/osf.io | scripts/approve_embargo_terminations.py | 6 | 2822 | """EmbargoTerminationApprovals are the Sanction subclass that allows users
to make Embargoes public before the official end date. Like RegistrationAprpovals
and Embargoes, if an admin fails to approve or reject this request within 48
hours it is approved automagically.
Run nightly, this script will approve any embargo termination
requests for which not all admins have responded within the 48 hour window.
Makes the Embargoed Node and its components public.
"""
import datetime
import logging
import sys
from django.utils import timezone
from django.db import transaction
from modularodm import Q
from framework.celery_tasks import app as celery_app
from website import models, settings
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_pending_embargo_termination_requests():
auto_approve_time = timezone.now() - settings.EMBARGO_TERMINATION_PENDING_TIME
return models.EmbargoTerminationApproval.find(
Q('initiation_date', 'lt', auto_approve_time) &
Q('state', 'eq', models.EmbargoTerminationApproval.UNAPPROVED)
)
def main():
pending_embargo_termination_requests = get_pending_embargo_termination_requests()
count = 0
for request in pending_embargo_termination_requests:
registration = models.Node.find_one(Q('embargo_termination_approval', 'eq', request))
if not registration.is_embargoed:
logger.warning("Registration {0} associated with this embargo termination request ({0}) is not embargoed.".format(
registration._id,
request._id
))
continue
embargo = registration.embargo
if not embargo:
logger.warning("No Embargo associated with this embargo termination request ({0}) on Node: {1}".format(
request._id,
registration._id
))
continue
else:
count += 1
logger.info("Ending the Embargo ({0}) of Registration ({1}) early. Making the registration and all of its children public now.".format(embargo._id, registration._id))
request._on_complete()
registration.reload()
assert registration.is_embargoed is False
assert registration.is_public is True
logger.info("Auto-approved {0} of {1} embargo termination requests".format(count, len(pending_embargo_termination_requests)))
@celery_app.task(name='scripts.approve_embargo_terminations')
def run_main(dry_run=True):
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
init_app(routes=False)
with transaction.atomic():
main()
if dry_run:
raise RuntimeError("Dry run, rolling back transaction")
| apache-2.0 | 8,636,664,773,812,053,000 | 37.657534 | 178 | 0.694543 | false |
pombredanne/pants | contrib/jax_ws/src/python/pants/contrib/jax_ws/targets/jax_ws_library.py | 4 | 1290 | # coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
logger = logging.getLogger(__name__)
class JaxWsLibrary(ExportableJvmLibrary):
"""Generates a Java library from JAX-WS wsdl files."""
def __init__(self,
payload=None,
xjc_args=None,
extra_args=None,
**kwargs):
"""Generates a Java library from WSDL files using JAX-WS.
:param list xjc_args: Additional arguments to xjc.
:param list extra_args: Additional arguments for the CLI.
"""
payload = payload or Payload()
payload.add_fields({
'xjc_args': PrimitiveField(self.assert_list(xjc_args, key_arg='xjc_args')),
'extra_args': PrimitiveField(self.assert_list(extra_args, key_arg='extra_args')),
})
super(JaxWsLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('codegen')
| apache-2.0 | -3,646,351,395,191,158,000 | 33.864865 | 93 | 0.682171 | false |
denzow/ipymessenger | test_ipymessenger.py | 1 | 2490 | #!/usr/bin/env python
# coding:utf-8
from __future__ import print_function, unicode_literals
import sys
import time
import traceback
from logging import StreamHandler
from ipymessenger.IpmsgServer import IpmsgServer
if __name__ == "__main__":
dest_host = "192.168.26.189"
# デバッグメッセージが必要な場合はloggingのHandlerを渡す
#ip = IpmsgServer("sayamada", "ymsft_group", 2722, StreamHandler(), broad_cast_addrs=["172.16.25.0"])
ip = IpmsgServer("sayamada", "ymsft_group", 2722, StreamHandler(), broad_cast_addrs=["172.16.25.0"], request_info_interval=20)
#ip.set_sendmsg_handler(lambda x: x.message.rstrip("\00")+"ADD BY HANDLER")
#ip = IpmsgServer("sayamada", "ymsft_group", 2722)
try:
#ip.set_sendmsg_handler(lambda x:print(x))
ip.start()
time.sleep(60)
"""
hello_no = ip.send_message(dest_host, "⑫", is_secret=True)
time.sleep(3)
print("######hello send success:" + str(ip.check_sended_message(hello_no)))
print("######hello is_read?:" + str(ip.check_readed_message(hello_no)))
time.sleep(5)
print("######hello is_read?:" + str(ip.check_readed_message(hello_no)))
for x in ip.host_list_dict:
print(x, ip.host_list_dict[x].group)
test_no = ip.send_message_by_fuzzy_nickname("slope 太郎", "へろー by name", is_secret=True)
time.sleep(5)
print("######test_no is success:" + str(ip.check_sended_message(test_no)))
test_no = ip.send_message_by_osusername("Administrator", "へろー by name", is_secret=True)
time.sleep(5)
print("######test_no is success:" + str(ip.check_sended_message(test_no)))
print(ip.sended_que)
#test_no = ip.send_message_by_osusername("Administrator", "へろー by name")
#time.sleep(5)
#print("######test_no is success:" + str(ip.check_sended_message(test_no)))
#print(ip.get_message("192.168.26.189"))
#time.sleep(10)
#print(ip.get_message("192.168.26.189"))
print("#"*20)
for x in ip.host_list_dict:
print(1,x)
print(2,ip.host_list_dict[x].addr)
print(3,ip.host_list_dict[x].user_name)
"""
time.sleep(100)
except Exception as e:
print("Exception occured")
error_args = sys.exc_info()
print(traceback.print_tb(error_args[2]))
print(e)
finally:
ip.stop()
ip.join()
| mit | 1,984,475,330,241,876,200 | 34.15942 | 130 | 0.596455 | false |
eduNEXT/edx-platform | lms/djangoapps/survey/utils.py | 4 | 2181 | """
Utilities for determining whether or not a survey needs to be completed.
"""
from django.utils.translation import ugettext as _
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.access_response import AccessError
from lms.djangoapps.courseware.access_utils import ACCESS_GRANTED
from lms.djangoapps.survey.models import SurveyAnswer, SurveyForm
class SurveyRequiredAccessError(AccessError):
"""
Access denied because the user has not completed a required survey
"""
def __init__(self):
error_code = "survey_required"
developer_message = "User must complete a survey"
user_message = _("You must complete a survey")
super().__init__(error_code, developer_message, user_message)
def is_survey_required_for_course(course_descriptor):
"""
Returns whether a Survey is required for this course
"""
# Check to see that the survey is required in the CourseBlock.
if not getattr(course_descriptor, 'course_survey_required', False):
return SurveyRequiredAccessError()
# Check that the specified Survey for the course exists.
return SurveyForm.get(course_descriptor.course_survey_name, throw_if_not_found=False)
def check_survey_required_and_unanswered(user, course_descriptor):
"""
Checks whether a user is required to answer the survey and has yet to do so.
Returns:
AccessResponse: Either ACCESS_GRANTED or SurveyRequiredAccessError.
"""
if not is_survey_required_for_course(course_descriptor):
return ACCESS_GRANTED
# anonymous users do not need to answer the survey
if user.is_anonymous:
return ACCESS_GRANTED
# course staff do not need to answer survey
has_staff_access = has_access(user, 'staff', course_descriptor)
if has_staff_access:
return ACCESS_GRANTED
# survey is required and it exists, let's see if user has answered the survey
survey = SurveyForm.get(course_descriptor.course_survey_name)
answered_survey = SurveyAnswer.do_survey_answers_exist(survey, user)
if answered_survey:
return ACCESS_GRANTED
return SurveyRequiredAccessError()
| agpl-3.0 | 4,444,762,562,629,214,700 | 33.619048 | 89 | 0.724438 | false |
nictuku/nwu | nwu/common/aptmethod.py | 1 | 6400 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Stephan Peijnik ([email protected])
#
# This file is part of NWU.
#
# NWU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NWU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NWU. If not, see <http://www.gnu.org/licenses/>.
import sys
class AptMethod:
def __init__(self, version):
self.version = version
self.capabilities = {}
self.running = False
def set_capability(self, name, value):
if not self.running:
self.capabilities[name] = value
else:
raise Exception('Cannot set capability whilst method is running.')
def write(self, data):
return sys.stdout.write(data)
def writeline(self, line):
return self.write(line + '\n')
def flush(self):
try:
return sys.stdout.flush()
except KeyboardInterrupt:
sys.exit(0)
def readline(self):
try:
return sys.stdin.readline()
except KeyboardInterrupt:
sys.exit(0)
def send_reply(self, msg_code, msg_string, headers={}):
self.writeline('%d %s' % (msg_code, msg_string))
for h in headers:
self.writeline('%s: %s' % (h, headers[h]))
self.write('\n')
self.flush()
def send_capabilities(self):
""" Send capabilities.
This initializes a session.
"""
headers = {'Version': self.version, 'Single-Instance': 'true'}
headers.update(self.capabilities)
self.send_reply(100, 'Capabilities', headers)
def send_log(self, message):
""" Send log message.
This causes apt, if debugging is enabled, to display the message.
"""
self.send_reply(101, 'Log', {'Message': message})
def send_status(self, message):
""" Send status message.
Used for displaying pre-transfer status messages.
"""
self.send_reply(102, 'Status', {'Message': message})
def send_uri_start(self, uri, size, lastmod, resumepoint):
headers = {
'URI': uri,
'Size': size,
'Last-Modified': lastmod,
'Resume-Point': resumepoint
}
self.send_reply(200, 'URI Start', headers)
def send_uri_done(self, uri, size, lastmod, filename, md5hash):
headers = {
'URI': uri,
'Size': size,
'Last-Modified': lastmod,
'Filename': filename,
'MD5-Hash': md5hash
}
self.send_reply(201, 'URI Done', headers)
def send_uri_failure(self, uri, message):
headers = {
'URI': uri,
'Message': message
}
self.send_reply(400, 'URI Failure', headers)
def send_failure(self, message):
self.send_reply(401, 'General Failure', {'Message': message})
def send_auth_required(self, site):
self.send_reply(402, 'Authorization Required', {'Site': site})
def send_media_failure(self, media, drive):
self.send_reply(403, 'Media Failure', {'Media': media, 'Drive': drive})
def handle_other(self, msg_code, msg_string, headers):
self.send_failure('Handler for %d (%s) not implemented.'
% (msg_code, msg_string))
sys.stderr.write('ERROR: unhandled message\n')
sys.stderr.write('msg_code : %s\n' % (msg_code))
sys.stderr.write('msg_string: %s\n' % (msg_string))
sys.stderr.write('headers : %s\n' % (headers))
sys.stderr.flush()
def run(self):
self.running = True
# First thing we should do is writing out our capabilities...
self.send_capabilities()
while not sys.stdin.closed:
have_full_request = False
message_code = None
message_string = None
message_headers = {}
while not have_full_request:
if sys.stdin.closed:
return
line = self.readline()
if line == '':
break
line = line.strip()
if not message_code and ' ' in line:
try:
message_code, message_string = line.split(' ', 1)
message_code = int(message_code)
except Exception, e:
self.send_failure('Internal error.')
elif line != '':
header_name = None
header_value = None
header_name, header_value = line.split(': ', 1)
message_headers[header_name] = header_value
elif line == '' and message_code:
have_full_request = True
else:
self.send_failure('Internal error.')
# we should have a full request at this point.
if have_full_request:
func = getattr(self, 'handle_%d' % (message_code), None)
if func:
func(message_string, message_headers)
else:
self.handle_other(message_code, message_string,
message_headers)
def convert_truefalse(self, truefalse):
if truefalse == 'true':
return True
return False
def handle_600(self, msg_string, headers):
"""
URI Acquire request. This method needs to be overridden!
This request should provide us with the following
headers:
Index-File Requested file is index file (true|false).
URI URI of file.
Filename Local filename to save file to.
"""
self.send_uri_failure(headers['URI'], 'handle_600 method not '
'implemented.')
if __name__ == '__main__':
AptMethod('1.0').run()
| gpl-3.0 | -3,385,302,614,599,720,000 | 31.820513 | 79 | 0.542031 | false |
doungni/selenium | py/selenium/webdriver/firefox/firefox_profile.py | 60 | 14669 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "[email protected]"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
#Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
#Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError) as e:
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'[email protected]', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
| apache-2.0 | -7,991,479,225,724,271,000 | 37.704485 | 100 | 0.591042 | false |
JonathanSalwan/Triton | src/examples/pin/runtime_register_modification.py | 1 | 1350 | #!/usr/bin/env python2
## -*- coding: utf-8 -*-
##
## Output:
##
## $ ./build/triton ./src/examples/pin/runtime_register_modification.py ./src/samples/crackmes/crackme_xor a
## 4005f9: mov dword ptr [rbp - 4], eax
## #180 = ((_ extract 31 24) (_ bv0 32)) ; byte reference - MOV operation
## #181 = ((_ extract 23 16) (_ bv0 32)) ; byte reference - MOV operation
## #182 = ((_ extract 15 8) (_ bv0 32)) ; byte reference - MOV operation
## #183 = ((_ extract 7 0) (_ bv0 32)) ; byte reference - MOV operation
## #184 = (concat ((_ extract 31 24) (_ bv0 32)) ((_ extract 23 16) (_ bv0 32)) ((_ extract 15 8) (_ bv0 32)) ((_ extract 7 0) (_ bv0 32))) ; concat reference - MOV operation
## #185 = (_ bv4195836 64) ; Program Counter
## Win
##
import sys
from pintool import *
from triton import ARCH
def cb1(inst):
if inst.getAddress() == 0x4005e2:
setCurrentRegisterValue(getTritonContext().registers.rax, 0)
def cb2(inst):
if inst.getAddress() == 0x4005e2:
print(inst)
for expr in inst.getSymbolicExpressions():
print('\t %s' %(expr))
if __name__ == '__main__':
setupImageWhitelist(['crackme'])
startAnalysisFromSymbol('main')
insertCall(cb1, INSERT_POINT.BEFORE_SYMPROC)
insertCall(cb2, INSERT_POINT.BEFORE)
runProgram()
| apache-2.0 | 3,029,729,380,985,088,000 | 34.526316 | 183 | 0.593333 | false |
ccpgames/eve-metrics | web2py/gluon/custom_import.py | 1 | 7867 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __builtin__
import os
import re
import sys
import threading
import traceback
from gluon import current
NATIVE_IMPORTER = __builtin__.__import__
INVALID_MODULES = set(('', 'gluon', 'applications', 'custom_import'))
# backward compatibility API
def custom_import_install():
if __builtin__.__import__ == NATIVE_IMPORTER:
INVALID_MODULES.update(sys.modules.keys())
__builtin__.__import__ = custom_importer
def track_changes(track=True):
assert track in (True, False), "must be True or False"
current.request._custom_import_track_changes = track
def is_tracking_changes():
return current.request._custom_import_track_changes
class CustomImportException(ImportError):
pass
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
"""
The web2py custom importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x".
If the import failed, fall back on naive_importer
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
if current.request._custom_import_track_changes:
base_importer = TRACK_IMPORTER
else:
base_importer = NATIVE_IMPORTER
except: # there is no current.request (should never happen)
base_importer = NATIVE_IMPORTER
# if not relative and not from applications:
if hasattr(current, 'request') \
and level <= 0 \
and not name.split('.')[0] in INVALID_MODULES \
and isinstance(globals, dict):
import_tb = None
try:
try:
oname = name if not name.startswith('.') else '.'+name
return NATIVE_IMPORTER(oname, globals, locals, fromlist, level)
except ImportError:
items = current.request.folder.split(os.path.sep)
if not items[-1]:
items = items[:-1]
modules_prefix = '.'.join(items[-2:]) + '.modules'
if not fromlist:
# import like "import x" or "import x.y"
result = None
for itemname in name.split("."):
new_mod = base_importer(
modules_prefix, globals, locals, [itemname], level)
try:
result = result or new_mod.__dict__[itemname]
except KeyError, e:
raise ImportError, 'Cannot import module %s' % str(e)
modules_prefix += "." + itemname
return result
else:
# import like "from x import a, b, ..."
pname = modules_prefix + "." + name
return base_importer(pname, globals, locals, fromlist, level)
except ImportError, e1:
import_tb = sys.exc_info()[2]
try:
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
except ImportError, e3:
raise ImportError, e1, import_tb # there an import error in the module
except Exception, e2:
raise e2 # there is an error in the module
finally:
if import_tb:
import_tb = None
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
class TrackImporter(object):
"""
An importer tracking the date of the module files and reloading them when
they have changed.
"""
THREAD_LOCAL = threading.local()
PACKAGE_PATH_SUFFIX = os.path.sep + "__init__.py"
def __init__(self):
self._import_dates = {} # Import dates of the files of the modules
def __call__(self, name, globals=None, locals=None, fromlist=None, level=-1):
"""
The import method itself.
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
if not hasattr(self.THREAD_LOCAL, '_modules_loaded'):
self.THREAD_LOCAL._modules_loaded = set()
try:
# Check the date and reload if needed:
self._update_dates(name, globals, locals, fromlist, level)
# Try to load the module and update the dates if it works:
result = NATIVE_IMPORTER(name, globals, locals, fromlist, level)
# Module maybe loaded for the 1st time so we need to set the date
self._update_dates(name, globals, locals, fromlist, level)
return result
except Exception, e:
raise # Don't hide something that went wrong
def _update_dates(self, name, globals, locals, fromlist, level):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name, globals, locals, level)
for fromlist_name in fromlist or []:
pname = "%s.%s" % (name, fromlist_name)
self._reload_check(pname, globals, locals, level)
def _reload_check(self, name, globals, locals, level):
"""
Update the date associated to the module and reload the module if
the file has changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
if file:
date = self._import_dates.get(file)
new_date = None
reload_mod = False
mod_to_pack = False # Module turning into a package? (special case)
try:
new_date = os.path.getmtime(file)
except:
self._import_dates.pop(file, None) # Clean up
# Handle module changing in package and
#package changing in module:
if file.endswith(".py"):
# Get path without file ext:
file = os.path.splitext(file)[0]
reload_mod = os.path.isdir(file) \
and os.path.isfile(file + self.PACKAGE_PATH_SUFFIX)
mod_to_pack = reload_mod
else: # Package turning into module?
file += ".py"
reload_mod = os.path.isfile(file)
if reload_mod:
new_date = os.path.getmtime(file) # Refresh file date
if reload_mod or not date or new_date > date:
self._import_dates[file] = new_date
if reload_mod or (date and new_date > date):
if module not in self.THREAD_LOCAL._modules_loaded:
if mod_to_pack:
# Module turning into a package:
mod_name = module.__name__
del sys.modules[mod_name] # Delete the module
# Reload the module:
NATIVE_IMPORTER(mod_name, globals, locals, [], level)
else:
reload(module)
self.THREAD_LOCAL._modules_loaded.add(module)
def _get_module_file(self, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
file = os.path.splitext(file)[0] + ".py" # Change .pyc for .py
if file.endswith(self.PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file
TRACK_IMPORTER = TrackImporter()
| mit | -1,467,487,067,982,207,000 | 37.139303 | 87 | 0.535655 | false |
BinPy/BinPy | BinPy/examples/source/Gates/XNOR.py | 4 | 1114 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Examples for XNOR class.
# <codecell>
from __future__ import print_function
from BinPy.gates import *
# <codecell>
# Initializing the XNOR class
gate = XNOR(0, 1)
# <codecell>
# Output of the XNOR gate
print (gate.output())
# <codecell>
# Input changes
# Input at index 1 is changed to 0
gate.set_input(1, 0)
# <codecell>
# New Output of the XNOR gate
print (gate.output())
# <codecell>
# Changing the number of inputs
# No need to set the number, just change the inputs
gate.set_inputs(1, 1, 1, 1)
# <codecell>
# To get the input states
print (gate.get_input_states())
# <codecell>
# New output of the XNOR gate
print (gate.output())
# <codecell>
# Using Connectors as the input lines
# Take a Connector
conn = Connector()
# <codecell>
# Set Output of gate to Connector conn
gate.set_output(conn)
# <codecell>
# Put this connector as the input to gate1
gate1 = XNOR(conn, 0)
# <codecell>
# Output of the gate1
print (gate1.output())
# <codecell>
# Information about gate instance
print (gate)
| bsd-3-clause | 6,761,703,741,280,542,000 | 11.516854 | 51 | 0.67684 | false |
BNIA/tidydate | src/modules/tidyall.py | 2 | 3960 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tidyall
"""
from os import remove
import sys
from textwrap import dedent
import pandas as pd
import numpy as np
from .settings import VALID_COLS
from .tidybnl import TidyBlockNLot
from .tidydate import TidyDate
class TidyAll(object):
def __init__(self, file_path, debug=False):
"""Constructs a TidyAll object by creating a dataframe from the input
file
Args:
file_path (`str`): path of the uploaded dataset
Returns:
None
"""
self.file_path = file_path
self.debug = debug
self.file_name = ""
self.df = self.to_df()
self.column = {}
self.options = []
def __del__(self):
"""Destructor to remove the uploaded file after conversion
Args:
None
Returns:
None
"""
if not self.debug:
remove(self.file_path)
def to_df(self):
"""Converts the input file into a Pandas Dataframe
Args:
file_path (`str`): path of the uploaded dataset
Return:
file_name (`str`): name of the input file
(`obj: pandas.Dataframe`): dataframe of the file
"""
self.file_name, ext = self.file_path.rsplit('.', 1)
if ext == "csv":
return pd.read_csv(self.file_path)
elif ext == "xlsx":
return pd.read_excel(self.file_path)
sys.exit("Only CSV and Excel files are supported")
def get_cols(self):
"""Returns the columns found in the input file
Args:
None
Returns:
(`list` of `str`): column names of dataframe
"""
return set(self.df)
def set_col(self, column):
"""Set the date column to be parsed
Args:
column (`str`): column name
Returns:
None
"""
column_vals = column.values()
if len(set(column_vals) & self.get_cols()) \
== len(set(column_vals)):
self.column = column
else:
possible_cols = ", ".join(
[col for col in list(self.df) if any(
x in col.lower() for x in VALID_COLS)]
)
sys.exit(
dedent(
("Inputted columns ({wrong_col}) do not exist.\n"
"Possible columns are:\n"
"{cols}".format(
wrong_col=", ".join(column_vals),
cols=possible_cols
)
)
)
)
def set_opt(self, options=["date"]):
self.options = options
if "date" in self.options:
tidydate_obj = TidyDate(self.df, self.column["date"])
self.df = tidydate_obj.parse()
if "blocknlot" in self.options:
blocknlot_col = {}
for key, value in self.column.items():
if key != "date":
blocknlot_col[key] = value
tidyblocknlot_obj = TidyBlockNLot(self.df, blocknlot_col)
self.df = tidyblocknlot_obj.parse()
def download(self):
"""Initializes the parsing and cleaning procedure, and then saves the
dataframe as a CSV file
Args:
None
Returns:
`True` if file was created successfully
"""
new_file = self.file_name + "_tidy.csv"
self.df.to_csv(new_file, encoding="utf-8", index=False)
match_sets = []
for key, value in self.column.items():
match_sets.append(key)
match_sets.append(
set(
np.where(
self.df[value] == self.df["tidy_" + key],
True, False
)
)
)
return {True} not in match_sets
| mit | -5,601,075,572,408,982,000 | 23 | 77 | 0.490152 | false |
rwaldron/depthjs | old/backend/tornado/simple_httpclient.py | 1 | 12056 | #!/usr/bin/env python
from __future__ import with_statement
from cStringIO import StringIO
from tornado.httpclient import HTTPRequest, HTTPResponse, HTTPError
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado import stack_context
import collections
import contextlib
import errno
import functools
import logging
import re
import socket
import time
import urlparse
import weakref
import zlib
try:
import ssl # python 2.6+
except ImportError:
ssl = None
class SimpleAsyncHTTPClient(object):
"""Non-blocking HTTP client with no external dependencies.
WARNING: This class is still in development and not yet recommended
for production use.
This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
It does not currently implement all applicable parts of the HTTP
specification, but it does enough to work with major web service APIs
(mostly tested against the Twitter API so far).
Many features found in the curl-based AsyncHTTPClient are not yet
implemented. The currently-supported set of parameters to HTTPRequest
are url, method, headers, body, streaming_callback, and header_callback.
Connections are not reused, and no attempt is made to limit the number
of outstanding requests.
Python 2.6 or higher is required for HTTPS support. Users of Python 2.5
should use the curl-based AsyncHTTPClient if HTTPS support is required.
"""
_ASYNC_CLIENTS = weakref.WeakKeyDictionary()
def __new__(cls, io_loop=None, max_clients=10,
max_simultaneous_connections=None,
force_instance=False):
"""Creates a SimpleAsyncHTTPClient.
Only a single SimpleAsyncHTTPClient instance exists per IOLoop
in order to provide limitations on the number of pending connections.
force_instance=True may be used to suppress this behavior.
max_clients is the number of concurrent requests that can be in
progress. max_simultaneous_connections has no effect and is accepted
only for compatibility with the curl-based AsyncHTTPClient. Note
that these arguments are only used when the client is first created,
and will be ignored when an existing client is reused.
"""
io_loop = io_loop or IOLoop.instance()
if io_loop in cls._ASYNC_CLIENTS and not force_instance:
return cls._ASYNC_CLIENTS[io_loop]
else:
instance = super(SimpleAsyncHTTPClient, cls).__new__(cls)
instance.io_loop = io_loop
instance.max_clients = max_clients
instance.queue = collections.deque()
instance.active = {}
if not force_instance:
cls._ASYNC_CLIENTS[io_loop] = instance
return instance
def close(self):
pass
def fetch(self, request, callback, **kwargs):
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
if not isinstance(request.headers, HTTPHeaders):
request.headers = HTTPHeaders(request.headers)
callback = stack_context.wrap(callback)
self.queue.append((request, callback))
self._process_queue()
if self.queue:
logging.debug("max_clients limit reached, request queued. "
"%d active, %d queued requests." % (
len(self.active), len(self.queue)))
def _process_queue(self):
with stack_context.NullContext():
while self.queue and len(self.active) < self.max_clients:
request, callback = self.queue.popleft()
key = object()
self.active[key] = (request, callback)
_HTTPConnection(self.io_loop, request,
functools.partial(self._on_fetch_complete,
key, callback))
def _on_fetch_complete(self, key, callback, response):
del self.active[key]
callback(response)
self._process_queue()
class _HTTPConnection(object):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE"])
def __init__(self, io_loop, request, callback):
self.start_time = time.time()
self.io_loop = io_loop
self.request = request
self.callback = callback
self.code = None
self.headers = None
self.chunks = None
self._decompressor = None
# Timeout handle returned by IOLoop.add_timeout
self._timeout = None
with stack_context.StackContext(self.cleanup):
parsed = urlparse.urlsplit(self.request.url)
if ":" in parsed.netloc:
host, _, port = parsed.netloc.partition(":")
port = int(port)
else:
host = parsed.netloc
port = 443 if parsed.scheme == "https" else 80
if parsed.scheme == "https":
# TODO: cert verification, etc
self.stream = SSLIOStream(socket.socket(),
io_loop=self.io_loop)
else:
self.stream = IOStream(socket.socket(),
io_loop=self.io_loop)
timeout = min(request.connect_timeout, request.request_timeout)
if timeout:
self._connect_timeout = self.io_loop.add_timeout(
self.start_time + timeout,
self._on_timeout)
self.stream.connect((host, port),
functools.partial(self._on_connect, parsed))
def _on_timeout(self):
self._timeout = None
self.stream.close()
if self.callback is not None:
self.callback(HTTPResponse(self.request, 599,
error=HTTPError(599, "Timeout")))
self.callback = None
def _on_connect(self, parsed):
if self._timeout is not None:
self.io_loop.remove_callback(self._timeout)
self._timeout = None
if self.request.request_timeout:
self._timeout = self.io_loop.add_timeout(
self.start_time + self.request.request_timeout,
self._on_timeout)
if (self.request.method not in self._SUPPORTED_METHODS and
not self.request.allow_nonstandard_methods):
raise KeyError("unknown method %s" % self.request.method)
if self.request.network_interface:
raise NotImplementedError(
"network interface selection not supported")
if "Host" not in self.request.headers:
self.request.headers["Host"] = parsed.netloc
if self.request.auth_username:
auth = "%s:%s" % (self.request.auth_username,
self.request.auth_password)
self.request.headers["Authorization"] = ("Basic %s" %
auth.encode("base64"))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
has_body = self.request.method in ("POST", "PUT")
if has_body:
assert self.request.body is not None
self.request.headers["Content-Length"] = len(
self.request.body)
else:
assert self.request.body is None
if (self.request.method == "POST" and
"Content-Type" not in self.request.headers):
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
if self.request.use_gzip:
self.request.headers["Accept-Encoding"] = "gzip"
req_path = ((parsed.path or '/') +
(('?' + parsed.query) if parsed.query else ''))
request_lines = ["%s %s HTTP/1.1" % (self.request.method,
req_path)]
for k, v in self.request.headers.get_all():
request_lines.append("%s: %s" % (k, v))
self.stream.write("\r\n".join(request_lines) + "\r\n\r\n")
if has_body:
self.stream.write(self.request.body)
self.stream.read_until("\r\n\r\n", self._on_headers)
@contextlib.contextmanager
def cleanup(self):
try:
yield
except Exception, e:
logging.warning("uncaught exception", exc_info=True)
if self.callback is not None:
self.callback(HTTPResponse(self.request, 599, error=e))
self.callback = None
def _on_headers(self, data):
first_line, _, header_data = data.partition("\r\n")
match = re.match("HTTP/1.[01] ([0-9]+) .*", first_line)
assert match
self.code = int(match.group(1))
self.headers = HTTPHeaders.parse(header_data)
if self.request.header_callback is not None:
for k, v in self.headers.get_all():
self.request.header_callback("%s: %s\r\n" % (k, v))
if (self.request.use_gzip and
self.headers.get("Content-Encoding") == "gzip"):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
if self.headers.get("Transfer-Encoding") == "chunked":
self.chunks = []
self.stream.read_until("\r\n", self._on_chunk_length)
elif "Content-Length" in self.headers:
self.stream.read_bytes(int(self.headers["Content-Length"]),
self._on_body)
else:
raise Exception("No Content-length or chunked encoding, "
"don't know how to read %s", self.request.url)
def _on_body(self, data):
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
if self._decompressor:
data = self._decompressor.decompress(data)
if self.request.streaming_callback:
if self.chunks is None:
# if chunks is not None, we already called streaming_callback
# in _on_chunk_data
self.request.streaming_callback(data)
buffer = StringIO()
else:
buffer = StringIO(data) # TODO: don't require one big string?
response = HTTPResponse(self.request, self.code, headers=self.headers,
buffer=buffer)
self.callback(response)
self.callback = None
def _on_chunk_length(self, data):
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
length = int(data.strip(), 16)
if length == 0:
# all the data has been decompressed, so we don't need to
# decompress again in _on_body
self._decompressor = None
self._on_body(''.join(self.chunks))
else:
self.stream.read_bytes(length + 2, # chunk ends with \r\n
self._on_chunk_data)
def _on_chunk_data(self, data):
assert data[-2:] == "\r\n"
chunk = data[:-2]
if self._decompressor:
chunk = self._decompressor.decompress(chunk)
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
self.chunks.append(chunk)
self.stream.read_until("\r\n", self._on_chunk_length)
def main():
from tornado.options import define, options, parse_command_line
args = parse_command_line()
client = SimpleAsyncHTTPClient()
io_loop = IOLoop.instance()
for arg in args:
def callback(response):
io_loop.stop()
response.rethrow()
print response.body
client.fetch(arg, callback)
io_loop.start()
if __name__ == "__main__":
main()
| agpl-3.0 | 6,752,048,948,973,244,000 | 40.006803 | 101 | 0.587094 | false |
robbert-harms/mri-tools | mri_tools/plots/layouts.py | 1 | 2237 | import itertools
import math
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
__author__ = 'Robbert Harms'
__date__ = "2015-11-06"
__maintainer__ = "Robbert Harms"
__email__ = "[email protected]"
class GridLayout(object):
def __init__(self):
self.spacings = dict(left=0.04, right=0.96, top=0.95, bottom=0.07)
def get_axis(self, index, nmr_plots):
"""Get the axis for the subplot at the given index in the data list.
Args:
index (int): the index of the subplot in the list of plots
nmr_plots (int): the total number of plots
Returns:
axis: a matplotlib axis object that can be drawn on
"""
class SquareGridLayout(GridLayout):
def get_axis(self, index, nmr_plots):
rows, cols = self._get_row_cols_square(nmr_plots)
grid = GridSpec(rows, cols, **self.spacings)
return plt.subplot(grid[index])
def _get_row_cols_square(self, nmr_plots):
defaults = ((1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (2, 3), (2, 3))
if nmr_plots < len(defaults):
return defaults[nmr_plots - 1]
else:
cols = math.ceil(nmr_plots / 3.0)
rows = math.ceil(float(nmr_plots) / cols)
rows = int(rows)
cols = int(cols)
return rows, cols
class LowerTriangleGridLayout(GridLayout):
def __init__(self, size):
super(LowerTriangleGridLayout, self).__init__()
self._size = size
self._positions = []
for y, x in itertools.product(range(self._size), range(self._size)):
if x >= y:
self._positions.append(x * self._size + y)
def get_axis(self, index, nmr_plots):
grid = GridSpec(self._size, self._size, **self.spacings)
return plt.subplot(grid[self._positions[index]])
class SingleColumnGridLayout(GridLayout):
def get_axis(self, index, nmr_plots):
grid = GridSpec(nmr_plots, 1, **self.spacings)
return plt.subplot(grid[index])
class SingleRowGridLayout(GridLayout):
def get_axis(self, index, nmr_plots):
grid = GridSpec(1, nmr_plots, **self.spacings)
return plt.subplot(grid[index])
| bsd-3-clause | -8,466,810,770,695,110,000 | 28.826667 | 76 | 0.602593 | false |
frank-y-liu/gatk | src/main/resources/org/broadinstitute/hellbender/tools/walkers/vqsr/training.py | 2 | 46416 | # Imports
import os
import sys
import vcf
import math
import h5py
import time
import pysam
import vqsr_cnn
import numpy as np
from Bio import Seq, SeqIO
from collections import Counter
# Keras Imports
import keras.backend as K
def run():
args = vqsr_cnn.parse_args()
if 'write_reference_and_annotation_tensors' == args.mode:
write_reference_and_annotation_tensors(args)
elif 'write_read_and_annotation_tensors' == args.mode:
write_read_and_annotation_tensors(args)
elif 'train_on_reference_tensors_and_annotations' == args.mode:
train_on_reference_tensors_and_annotations(args)
elif 'train_on_read_tensors_and_annotations' == args.mode:
train_on_read_tensors_and_annotations(args)
elif 'train_tiny_model_on_read_tensors_and_annotations' == args.mode:
train_tiny_model_on_read_tensors_and_annotations(args)
elif 'train_small_model_on_read_tensors_and_annotations' == args.mode:
train_small_model_on_read_tensors_and_annotations(args)
else:
raise ValueError('Unknown training mode:', args.mode)
def write_reference_and_annotation_tensors(args, include_dna=True, include_annotations=True):
if not args.tensor_name in vqsr_cnn.TENSOR_MAPS_1D:
raise ValueError('Unknown tensor name:', args.tensor_name, '1d maps must be in:', str(vqsr_cnn.TENSOR_MAPS_1D))
record_dict = SeqIO.to_dict(SeqIO.parse(args.reference_fasta, "fasta"))
if os.path.splitext(args.input_vcf)[-1].lower() == '.gz':
vcf_reader = vcf.Reader(open(args.input_vcf, 'rb'))
else:
vcf_reader = vcf.Reader(open(args.input_vcf, 'r'))
if os.path.splitext(args.train_vcf)[-1].lower() == '.gz':
vcf_ram = vcf.Reader(open(args.train_vcf, 'rb'))
else:
vcf_ram = vcf.Reader(open(args.train_vcf, 'r'))
bed_dict = bed_file_to_dict(args.bed_file)
stats = Counter()
if args.chrom:
variants = vcf_reader.fetch(args.chrom, args.start_pos, args.end_pos)
else:
variants = vcf_reader
for variant in variants:
for allele_idx, allele in enumerate(variant.ALT):
idx_offset, ref_start, ref_end = get_variant_window(args, variant)
contig = record_dict[variant.CHROM]
record = contig[variant.POS-idx_offset: variant.POS+idx_offset]
cur_label_key = get_true_label(allele, variant, bed_dict, vcf_ram, stats)
if not cur_label_key or downsample(args, cur_label_key, stats):
continue
if include_annotations:
if all(map(
lambda x: x not in variant.INFO and x not in variant.FORMAT and x != "QUAL", args.annotations)):
stats['Missing ALL annotations'] += 1
continue # Require at least 1 annotation...
annotation_data = get_annotation_data(args, variant, stats)
if include_dna:
dna_data = np.zeros( (args.window_size, len(vqsr_cnn.DNA_SYMBOLS)) )
for i,b in enumerate(record.seq):
if b in vqsr_cnn.DNA_SYMBOLS:
dna_data[i, vqsr_cnn.DNA_SYMBOLS[b]] = 1.0
elif b in vqsr_cnn.AMBIGUITY_CODES:
dna_data[i] = vqsr_cnn.AMBIGUITY_CODES[b]
else:
raise ValueError('Error! Unknown code:', b)
tp = get_path_to_train_valid_or_test(args.data_dir)
tp += cur_label_key +'/'+ plain_name(args.input_vcf) +'_'+ plain_name(args.train_vcf)
tp += '_allele_' + str(allele_idx) +'-'+ variant.CHROM +'_'+ str(variant.POS) + vqsr_cnn.TENSOR_SUFFIX
if not os.path.exists(os.path.dirname(tp)):
os.makedirs(os.path.dirname(tp))
with h5py.File(tp, 'w') as hf:
if include_annotations:
hf.create_dataset(args.annotation_set, data=annotation_data, compression='gzip')
if include_dna:
hf.create_dataset(args.tensor_name, data=dna_data, compression='gzip')
stats[cur_label_key] += 1
stats['count'] += 1
if stats['count']%500==0:
print('Wrote', stats['count'], 'out of:', args.samples, 'Last variant:', variant)
if args.samples <= stats['count']:
break
print('Done Writing 1D Tensors. Tensor Map:', args.tensor_name, ' Annotation set:', args.annotation_set)
for k in stats.keys():
print(k, ' has:', stats[k])
def write_read_and_annotation_tensors(args, include_annotations=True, pileup=False):
'''Create tensors structured as tensor map of reads organized by labels in the data directory.
Defines true variants as those in the args.train_vcf, defines false variants as
those called in args.input_vcf and in the args.bed_file high confidence intervals,
but not in args.train_vcf.
Arguments
args.data_dir: directory where tensors will live. Created here and filled with
subdirectories of test, valid and train, each containing
subdirectories for each label with tensors stored as hd5 files.
args.bam_file: BAM or BAMout file where the aligned reads are stored
args.input_vcf: VCF file with annotation values from Haplotype caller or VQSR
args.train_vcf: VCF file with true variant (from NIST or Platinum genomes, etc.)
args.bed_file: High confidence intervals for the calls in args.train_vcf
args.window_size: Size of sequence window around variant (width of the tensor)
args.read_limit: Maximum number of reads to include (height of the tensor)
args.chrom: Only write tensors from this chromosome (optional, used for parallelization)
args.start_pos: Only write tensors after this position (optional, used for parallelization)
args.end_pos: Only write tensors before this position (optional, used for parallelization)
'''
print('Writing tensors with:', args.tensor_name, 'channel map.')
stats = Counter()
samfile = pysam.AlignmentFile(args.bam_file, "rb")
bed_dict = bed_file_to_dict(args.bed_file)
record_dict = SeqIO.to_dict(SeqIO.parse(args.reference_fasta, "fasta"))
vcf_reader = vcf.Reader(open(args.input_vcf, 'r'))
vcf_ram = vcf.Reader(open(args.train_vcf, 'rb'))
if args.chrom:
variants = vcf_reader.fetch(args.chrom, args.start_pos, args.end_pos)
else:
variants = vcf_reader
for variant in variants:
for allele_idx, allele in enumerate(variant.ALT):
idx_offset, ref_start, ref_end = get_variant_window(args, variant)
contig = record_dict[variant.CHROM]
record = contig[ ref_start : ref_end ]
cur_label_key = get_true_label(allele, variant, bed_dict, vcf_ram, stats)
if not cur_label_key or downsample(args, cur_label_key, stats):
continue
if include_annotations:
if all(map(
lambda x: x not in variant.INFO and x not in variant.FORMAT and x != "QUAL", args.annotations)):
stats['Missing ALL annotations'] += 1
continue # Require at least 1 annotation...
annotation_data = get_annotation_data(args, variant, stats)
good_reads, insert_dict = get_good_reads(args, samfile, variant)
if len(good_reads) >= args.read_limit:
stats['More reads than read_limit'] += 1
if len(good_reads) == 0:
stats['No reads aligned'] += 1
continue
reference_seq = record.seq
for i in sorted(insert_dict.keys(), key=int, reverse=True):
if i < 0:
reference_seq = vqsr_cnn.INDEL_CHAR*insert_dict[i] + reference_seq
else:
reference_seq = reference_seq[:i] + vqsr_cnn.INDEL_CHAR*insert_dict[i] + reference_seq[i:]
read_tensor = good_reads_to_tensor(args, good_reads, ref_start, insert_dict)
reference_sequence_into_tensor(args, reference_seq, read_tensor)
tensor_path = get_path_to_train_valid_or_test(args.data_dir)
tensor_prefix = plain_name(args.input_vcf) +'_'+ plain_name(args.train_vcf)
tensor_prefix += '_allele_' + str(allele_idx) + '-' + cur_label_key
tensor_path += cur_label_key + '/' + tensor_prefix + '-' + variant.CHROM
tensor_path += '_' + str(variant.POS) + vqsr_cnn.TENSOR_SUFFIX
stats[cur_label_key] += 1
if not os.path.exists(os.path.dirname(tensor_path)):
os.makedirs(os.path.dirname(tensor_path))
with h5py.File(tensor_path, 'w') as hf:
if pileup:
pileup_tensor = read_tensor_to_pileup(args, read_tensor)
hf.create_dataset('pileup_tensor', data=pileup_tensor, compression='gzip')
hf.create_dataset(args.tensor_name, data=read_tensor, compression='gzip')
if include_annotations:
hf.create_dataset(args.annotation_set, data=annotation_data, compression='gzip')
stats['count'] += 1
if stats['count']%100 == 0:
print('Wrote', stats['count'], 'tensors out of', args.samples, ' last variant:', str(variant))
if stats['count'] >= args.samples:
break
for s in stats.keys():
print(s, 'has:', stats[s])
if variant:
print('Done generating tensors. Last variant:', str(variant), 'from vcf:', args.input_vcf)
def train_on_reference_tensors_and_annotations(args):
'''Train a 1D Convolution plus reference tracks and MLP Annotation architecture.
Arguments:
args.data_dir: must be set to an appropriate directory with
subdirectories of test, valid and train, each containing
subdirectories for each label with tensors stored as hd5 files.
Reference and Annotation tensors must be generated by calling
write_reference_and_annotation_tensors() before this function is used.
Performance curves for CNN are plotted on the test dataset.
'''
train_paths, valid_paths, test_paths = get_train_valid_test_paths(args)
generate_train = dna_annotation_generator(args, train_paths)
generate_valid = dna_annotation_generator(args, valid_paths)
weight_path = vqsr_cnn.weight_path_from_args(args)
model = vqsr_cnn.build_reference_annotation_model(args)
model = vqsr_cnn.train_model_from_generators(args, model, generate_train, generate_valid, weight_path)
test = load_dna_annotations_positions_from_class_dirs(args, test_paths, per_class_max=args.samples)
if args.image_dir:
vqsr_cnn.plot_roc_per_class(model, [test[0], test[1]], test[2], args.labels, args.id, prefix=args.image_dir)
def train_on_read_tensors_and_annotations(args):
'''Trains a reference, read, and annotation CNN architecture on tensors at the supplied data directory.
This architecture looks at reads, read flags, reference sequence, and variant annotations.
Tensors must be generated by calling write_read_and_annotation_tensors() before this function is used.
After training with early stopping performance curves are plotted on the test dataset.
Arguments:
args.data_dir: must be set to an appropriate directory with
subdirectories of test, valid and train, each containing
subdirectories for each label with tensors stored as hd5 files.
'''
train_paths, valid_paths, test_paths = get_train_valid_test_paths(args)
generate_train = tensor_generator_from_label_dirs_and_args(args, train_paths)
generate_valid = tensor_generator_from_label_dirs_and_args(args, valid_paths)
weight_path = vqsr_cnn.weight_path_from_args(args)
model = vqsr_cnn.build_read_tensor_2d_and_annotations_model(args)
model = vqsr_cnn.train_model_from_generators(args, model, generate_train, generate_valid, weight_path)
test = load_tensors_and_annotations_from_class_dirs(args, test_paths, per_class_max=args.samples)
if args.image_dir:
vqsr_cnn.plot_roc_per_class(model, [test[0], test[1]], test[2], args.labels, args.id,
prefix=args.image_dir, batch_size=args.batch_size)
def train_tiny_model_on_read_tensors_and_annotations(args):
'''Trains a reference, read, and annotation CNN architecture on tensors at the supplied data directory.
This architecture looks at reads, read flags, reference sequence, and variant annotations.
Tensors must be generated by calling write_read_and_annotation_tensors() before this function is used.
After training with early stopping performance curves are plotted on the test dataset.
Arguments:
args.data_dir: must be set to an appropriate directory with
subdirectories of test, valid and train, each containing
subdirectories for each label with tensors stored as hd5 files.
'''
train_paths, valid_paths, test_paths = get_train_valid_test_paths(args)
generate_train = tensor_generator_from_label_dirs_and_args(args, train_paths)
generate_valid = tensor_generator_from_label_dirs_and_args(args, valid_paths)
weight_path = vqsr_cnn.weight_path_from_args(args)
model = vqsr_cnn.build_tiny_2d_annotation_model(args)
model = vqsr_cnn.train_model_from_generators(args, model, generate_train, generate_valid, weight_path)
test = load_tensors_and_annotations_from_class_dirs(args, test_paths, per_class_max=args.samples)
if args.image_dir:
vqsr_cnn.plot_roc_per_class(model, [test[0], test[1]], test[2], args.labels, args.id,
prefix=args.image_dir, batch_size=args.batch_size)
def train_small_model_on_read_tensors_and_annotations(args):
'''Trains a reference, read, and annotation CNN architecture on tensors at the supplied data directory.
This architecture looks at reads, read flags, reference sequence, and variant annotations.
Tensors must be generated by calling write_read_and_annotation_tensors() before this function is used.
After training with early stopping performance curves are plotted on the test dataset.
Arguments:
args.data_dir: must be set to an appropriate directory with
subdirectories of test, valid and train, each containing
subdirectories for each label with tensors stored as hd5 files.
'''
train_paths, valid_paths, test_paths = get_train_valid_test_paths(args)
generate_train = tensor_generator_from_label_dirs_and_args(args, train_paths)
generate_valid = tensor_generator_from_label_dirs_and_args(args, valid_paths)
weight_path = vqsr_cnn.weight_path_from_args(args)
model = vqsr_cnn.build_small_2d_annotation_model(args)
model = vqsr_cnn.train_model_from_generators(args, model, generate_train, generate_valid, weight_path)
test = load_tensors_and_annotations_from_class_dirs(args, test_paths, per_class_max=args.samples)
if args.image_dir:
vqsr_cnn.plot_roc_per_class(model, [test[0], test[1]], test[2], args.labels, args.id,
prefix=args.image_dir, batch_size=args.batch_size)
def get_annotation_data(args, annotation_variant, stats):
'''Return an array annotation data about the variant.
Arguments:
args.annotations: List of variant annotations to use
annotation_variant: the variant with annotation
stats: Counter of run statistics
Returns:
annotation_data: numpy array of annotation values
'''
annotation_data = np.zeros((len(args.annotations),))
for i, a in enumerate(args.annotations):
if a == 'QUAL':
annotation_data[i] = annotation_variant.QUAL
elif a == 'AF':
annotation_data[i] = annotation_variant.INFO[a][0]
elif a in annotation_variant.INFO and not math.isnan(annotation_variant.INFO[a]):
annotation_data[i] = annotation_variant.INFO[a]
elif a == 'MBQ':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.MBQ
elif a == 'MPOS':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.MPOS
elif a == 'MMQ':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.MMQ
elif a == 'MFRL_0':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.MFRL[0]
elif a == 'MFRL_1':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.MFRL[1]
elif a == 'AD_0':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.AD[0]
elif a == 'AD_1':
call = annotation_variant.genotype(args.sample_name)
annotation_data[i] = call.data.AD[1]
else:
stats['Could not find annotation:' + a] += 1
return annotation_data
def get_good_reads(args, samfile, variant, sort_by='base'):
'''Return an array of usable reads centered at the variant.
Ignores artificial haplotype read group.
Relies on pysam's cigartuples structure see: http://pysam.readthedocs.io/en/latest/api.html
Match, M -> 0
Insert, I -> 1
Deletion, D -> 2
Ref Skip, N -> 3
Soft Clip, S -> 4
Arguments:
args.read_limit: maximum number of reads to return
samfile: the BAM (or BAMout) file
variant: the variant around which reads will load
Returns:
good_reads: array of usable reads sorted by reference start position
insert_dict: a dict mapping read indices to max insertions at that point
'''
good_reads = []
insert_dict = {}
idx_offset, ref_start, ref_end = get_variant_window(args, variant)
for read in samfile.fetch(variant.CHROM, variant.POS-1, variant.POS+1):
if not read or not hasattr(read, 'cigarstring') or read.cigarstring is None:
continue
read_group = read.get_tag('RG')
if 'artificial' in read_group.lower():
continue
index_dif = ref_start - read.reference_start
if abs(index_dif) >= args.window_size:
continue
if 'I' in read.cigarstring:
cur_idx = 0
for t in read.cigartuples:
if t[0] == vqsr_cnn.CIGAR_CODE['I']:
insert_idx = cur_idx - index_dif
if insert_idx not in insert_dict:
insert_dict[insert_idx] = t[1]
elif insert_dict[insert_idx] < t[1]:
insert_dict[insert_idx] = t[1]
if t[0] in [vqsr_cnn.CIGAR_CODE['M'], vqsr_cnn.CIGAR_CODE['I'],
vqsr_cnn.CIGAR_CODE['S'], vqsr_cnn.CIGAR_CODE['D']]:
cur_idx += t[1]
good_reads.append(read)
if len(good_reads) > args.read_limit:
good_reads = np.random.choice(good_reads, size=args.read_limit, replace=False).tolist()
good_reads.sort(key=lambda x: x.reference_start + x.query_alignment_start)
if sort_by == 'base':
good_reads.sort(key=lambda read: get_base_to_sort_by(read, variant))
return good_reads, insert_dict
def get_base_to_sort_by(read, variant):
if len(read.query_alignment_sequence) > 0:
max_idx = len(read.query_alignment_sequence)-1
else:
return 'Z'
if variant.is_snp:
return read.query_alignment_sequence[clamp((variant.POS-read.reference_start)-1, 0, max_idx)]
elif variant.is_indel:
var_idx = variant.POS-read.reference_start
cur_idx = 0
for cur_op, length in read.cigartuples:
cur_idx += length
if cur_idx > var_idx:
if cur_op == vqsr_cnn.CIGAR_CODE['M']:
return read.query_alignment_sequence[clamp(var_idx, 0, max_idx)]
else:
return vqsr_cnn.CODE2CIGAR[cur_op]
return 'Y'
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
def good_reads_to_tensor(args, good_reads, ref_start, insert_dict):
'''Create a read tensor based on a tensor channel map.
Assumes read pairs have the same name.
Only loads reads that might align inside the tensor.
Arguments:
args.read_limit: maximum number of reads to return
good_reads: list of reads to make arrays from
ref_start: the beginning of the window in reference coordinates
insert_dict: a dict mapping read indices to max insertions at that point.
Returns:
tensor: 3D read tensor.
'''
channel_map = vqsr_cnn.get_tensor_channel_map_from_args(args)
tensor = np.zeros( vqsr_cnn.tensor_shape_from_args(args) )
for j,read in enumerate(good_reads):
rseq, rqual = sequence_and_qualities_from_read(args, read, ref_start, insert_dict)
flag_start = -1
flag_end = 0
for i,b in enumerate(rseq):
if i == args.window_size:
break
if b == vqsr_cnn.SKIP_CHAR:
continue
elif flag_start == -1:
flag_start = i
else:
flag_end = i
if b in args.input_symbols:
if b == vqsr_cnn.INDEL_CHAR:
if K.image_data_format() == 'channels_last':
tensor[j, i, args.input_symbols[b]] = 1.0
else:
tensor[args.input_symbols[b], j, i] = 1.0
else:
hot_array = quality_from_mode(args, rqual[i], b, args.input_symbols)
if K.image_data_format() == 'channels_last':
tensor[j, i, :4] = hot_array
else:
tensor[:4, j, i] = hot_array
elif b in vqsr_cnn.AMBIGUITY_CODES:
if K.image_data_format() == 'channels_last':
tensor[j, i, :4] = vqsr_cnn.AMBIGUITY_CODES[b]
else:
tensor[:4, j, i] = vqsr_cnn.AMBIGUITY_CODES[b]
else:
print('Error! Unknown symbol in seq block:', b)
return
flags = flag_to_array(read.flag)
for i in range(vqsr_cnn.READ_FLAGS):
flag_str = 'flag_bit_'+ str(i)
if flags[i] and flag_str in channel_map:
if K.image_data_format() == 'channels_last':
tensor[j, flag_start:flag_end, channel_map[flag_str]] = 1.0
else:
tensor[channel_map[flag_str], j, flag_start:flag_end] = 1.0
if 'mapping_quality' in channel_map:
mq = float(read.mapping_quality)/vqsr_cnn.MAPPING_QUALITY_MAX
if K.image_data_format() == 'channels_last':
tensor[j, flag_start:flag_end, channel_map['mapping_quality']] = mq
else:
tensor[channel_map['mapping_quality'], j, flag_start:flag_end] = mq
return tensor
def sequence_and_qualities_from_read(args, read, ref_start, insert_dict):
cur_idx = 0
my_indel_dict = {}
no_qual_filler = 0
index_dif = ref_start - read.reference_start
for t in read.cigartuples:
my_ref_idx = cur_idx - index_dif
if t[0] == vqsr_cnn.CIGAR_CODE['I'] and my_ref_idx in insert_dict:
my_indel_dict[my_ref_idx] = insert_dict[my_ref_idx] - t[1]
elif t[0] == vqsr_cnn.CIGAR_CODE['D']:
my_indel_dict[my_ref_idx] = t[1]
if t[0] in [vqsr_cnn.CIGAR_CODE['M'], vqsr_cnn.CIGAR_CODE['I'],
vqsr_cnn.CIGAR_CODE['S'], vqsr_cnn.CIGAR_CODE['D']]:
cur_idx += t[1]
for k in insert_dict.keys():
if k not in my_indel_dict:
my_indel_dict[k] = insert_dict[k]
rseq = read.query_alignment_sequence[:args.window_size]
rqual = read.query_alignment_qualities[:args.window_size].tolist()
if index_dif > 0:
rseq = rseq[index_dif:]
rqual = rqual[index_dif:]
elif index_dif < 0:
rseq = vqsr_cnn.SKIP_CHAR*(-index_dif) + rseq
rqual = [no_qual_filler]*(-index_dif) + rqual
for j in sorted(my_indel_dict.keys(), key=int, reverse=True):
if j < 1:
rseq = (vqsr_cnn.INDEL_CHAR*my_indel_dict[j]) + rseq
rqual = ([no_qual_filler]*my_indel_dict[j]) + rqual
else:
rseq = rseq[:j] + (vqsr_cnn.INDEL_CHAR*my_indel_dict[j]) + rseq[j:]
rqual = rqual[:j] + ([no_qual_filler]*my_indel_dict[j]) + rqual[j:]
return rseq, rqual
def read_tensor_to_pileup(args, read_tensor):
tensor_map = vqsr_cnn.get_tensor_channel_map_from_args(args)
channels = vqsr_cnn.get_reference_and_read_channels(args)
pileup_tensor = np.zeros((args.window_size, channels))
for i in range(args.window_size):
for key in tensor_map:
if 'read' not in key and 'reference' not in key:
continue
if 'read' in key and K.image_data_format() == 'channels_last':
pileup_tensor[i, tensor_map[key]] = np.sum(read_tensor[:, i, tensor_map[key]]) / args.window_size
elif 'read' in key:
pileup_tensor[i, tensor_map[key]] = np.sum(read_tensor[tensor_map[key], :, i]) / args.window_size
elif 'reference' in key and K.image_data_format() == 'channels_last':
pileup_tensor[i, tensor_map[key]] = np.amax(read_tensor[:, i, tensor_map[key]])
elif 'reference' in key:
pileup_tensor[i, tensor_map[key]] = np.amax(read_tensor[tensor_map[key], :, i])
else:
raise ValueError('Error unexpected key:'+key)
return pileup_tensor
def reference_sequence_into_tensor(args, reference_seq, tensor):
ref_offset = len(set(args.input_symbols.values()))
for i,b in enumerate(reference_seq):
if i == args.window_size:
break
if b in args.input_symbols:
if K.image_data_format() == 'channels_last':
tensor[:, i, ref_offset+args.input_symbols[b]] = 1.0
else:
tensor[ref_offset+args.input_symbols[b], :, i] = 1.0
elif b in vqsr_cnn.AMBIGUITY_CODES:
ambiguous_vector = np.tile(vqsr_cnn.AMBIGUITY_CODES[b], (args.read_limit, 1))
if K.image_data_format() == 'channels_last':
tensor[:, i, ref_offset:ref_offset+4] = ambiguous_vector
else:
tensor[ref_offset:ref_offset+4, :, i] = np.transpose(ambiguous_vector)
def flag_to_array(flag):
flags = []
for i in range(vqsr_cnn.READ_FLAGS):
flags.append((flag>>i)&1)
return np.array(flags)
def add_flags_to_read_tensor(args, tensor, tensor_channel_map, flags):
for k in tensor_channel_map.keys():
if 'flag' in k:
flag_bit = int(k.split('_')[-1])
for read_idx in range(flags.shape[1]):
if K.image_data_format() == 'channels_last':
tensor[read_idx, :, tensor_channel_map[k]] = flags[flag_bit, read_idx]
else:
tensor[tensor_channel_map[k], read_idx, :] = flags[flag_bit, read_idx]
def add_mq_to_read_tensor(args, tensor, tensor_channel_map, mapping_qualities):
if not 'mapping_quality' in tensor_channel_map:
return
for read_idx, mq in enumerate(mapping_qualities):
if K.image_data_format() == 'channels_last':
tensor[read_idx, :, tensor_channel_map['mapping_quality']] = float(mq) / vqsr_cnn.MAPPING_QUALITY_MAX
else:
tensor[tensor_channel_map['mapping_quality'], read_idx, :] = float(mq) / vqsr_cnn.MAPPING_QUALITY_MAX
def base_quality_to_phred_array(base_quality, base, base_dict):
phred = np.zeros((4,))
exponent = float(-base_quality) / 10.0
p = 1.0-(10.0**exponent) # Convert to probability
not_p = (1.0-p) / 3.0 # Error could be any of the other 3 bases
not_base_quality = -10 * np.log10(not_p) # Back to Phred
for b in base_dict.keys():
if b == vqsr_cnn.INDEL_CHAR:
continue
elif b == base:
phred[base_dict[b]] = base_quality
else:
phred[base_dict[b]] = not_base_quality
return phred
def base_quality_to_p_hot_array(base_quality, base, base_dict):
phot = np.zeros((4,))
exponent = float(-base_quality) / 10.0
p = 1.0-(10.0**exponent)
not_p = (1.0-p)/3.0
for b in base_dict.keys():
if b == base:
phot[base_dict[b]] = p
elif b == vqsr_cnn.INDEL_CHAR:
continue
else:
phot[base_dict[b]] = not_p
return phot
def quality_from_mode(args, base_quality, base, base_dict):
if args.base_quality_mode == 'phot':
return base_quality_to_p_hot_array(base_quality, base, base_dict)
elif args.base_quality_mode == 'phred':
return base_quality_to_phred_array(base_quality, base, base_dict)
elif args.base_quality_mode == '1hot':
one_hot = np.zeros((4,))
one_hot[base_dict[base]] = 1.0
return one_hot
else:
raise ValueError('Error! Unknown base quality mode:', args.base_quality_mode)
def get_true_label(allele, variant, bed_dict, truth_vcf, stats):
'''Defines the truth status of a variant allele given a truth vcf and confident region.
Arguments:
allele: The allele to check
variant: the variant whose allele we will check
bed_dict: confident region dict defined by intervals e.g. from bed_file_to_dict()
truth_vcf: vcf of validated variants
stats: Counter dict used to keep track of the label distribution, etc.
Returns:
None if outside the confident region
Otherwise a label string:
SNP if variant is snp and in truth vcf
INDEL if variant is indel and in truth vcf
NOT_SNP if variant is snp and not in truth vcf
NOT_INDEL if variant is indel and not in truth vcf
'''
in_bed = in_bed_file(bed_dict, variant.CHROM, variant.POS)
if allele_in_vcf(allele, variant, truth_vcf) and in_bed:
class_prefix = ''
elif in_bed:
class_prefix = 'NOT_'
else:
stats['Variant outside confident bed file'] += 1
return None
if variant.is_snp:
cur_label_key = class_prefix + 'SNP'
elif variant.is_indel:
cur_label_key = class_prefix + 'INDEL'
else:
stats['Not SNP or INDEL'] += 1
return None
return cur_label_key
def downsample(args, cur_label_key, stats):
'''Indicates whether or not to downsample a variant.
Arguments:
args.skip_positive_class: Skip all positive examples
args.downsample_snps: fraction of SNPs to keep
args.downsample_indels: fraction of INDELs to keep
cur_label_key: truth label from get_true_label()
stats: Counter dict used to keep track of a run
Returns:
Boolean: should we downsample this variant or not.
'''
if args.skip_positive_class and cur_label_key in ['SNP', 'INDEL']:
stats['Downsampled positive examples'] += 1
return True
if args.downsample_snps < 1.0 and cur_label_key == 'SNP':
dice = np.random.rand()
if dice > args.downsample_snps:
stats['Downsampled SNPs'] += 1
return True
elif args.downsample_indels < 1.0 and cur_label_key == 'INDEL':
dice = np.random.rand()
if dice > args.downsample_indels:
stats['Downsampled INDELs'] += 1
return True
if args.downsample_not_snps < 1.0 and cur_label_key == 'NOT_SNP':
dice = np.random.rand()
if dice > args.downsample_not_snps:
stats['Downsampled NOT_SNPs'] += 1
return True
elif args.downsample_not_indels < 1.0 and cur_label_key == 'NOT_INDEL':
dice = np.random.rand()
if dice > args.downsample_not_indels:
stats['Downsampled NOT_INDELs'] += 1
return True
return False
def interval_file_to_dict(interval_file, shift1=0, skip=['@']):
''' Create a dict to store intervals from a interval list file.
Arguments:
interval_file: the file to load either a bed file -> shift1 should be 1
or a picard style interval_list file -> shift1 should be 0
shift1: Shift the intervals 1 position over to align with 1-indexed VCFs
skip: Comment character to ignore
Returns:
intervals: dict where keys in the dict are contig ids
values are a tuple of arrays the first array
in the tuple contains the start positions
the second array contains the end positions.
'''
intervals = {}
with open(interval_file) as f:
for line in f:
if line[0] in skip:
continue
parts = line.split()
contig = parts[0]
lower = int(parts[1])+shift1
upper = int(parts[2])+shift1
if contig not in intervals:
intervals[contig] = ([], [])
intervals[contig][0].append(lower)
intervals[contig][1].append(upper)
for k in intervals.keys():
intervals[k] = (np.array(intervals[k][0]), np.array(intervals[k][1]))
return intervals
def bed_file_to_dict(bed_file):
return interval_file_to_dict(bed_file, shift1=1)
def in_bed_file(bed_dict, contig, pos):
# Exclusive
lows = bed_dict[contig][0]
ups = bed_dict[contig][1]
return np.any((lows <= pos) & (pos < ups))
def allele_in_vcf(allele, variant, vcf_ram):
''' Check if variant's allele is in a VCF file.
Arguments
allele: the allele from the provided variant that we are checking
variant: the variant whose allele we are looking for
vcf_ram: the VCF we look in, must have an index (tbi, or idx)
Returns
variant if it is found otherwise None
'''
variants = vcf_ram.fetch(variant.CHROM, variant.POS-1, variant.POS)
for v in variants:
if v.CHROM == variant.CHROM and v.POS == variant.POS and allele in v.ALT:
return v
return None
def get_variant_window(args, variant):
index_offset = (args.window_size//2)
reference_start = variant.POS-(index_offset+1)
reference_end = variant.POS+index_offset
return index_offset, reference_start, reference_end
def dna_annotation_generator(args, train_paths):
"""Data generator of DNA and annotation tensors.
Assumes train paths contains example in labelled directories.
Loops over all examples sampling args.batch_size examples
uniformly from each label.
Arguments:
args: args object needed for batch_size, labels, and annotations
train_paths: array of label directories with hd5 tensors within each
Returns:
A tuple with a dict of the input tensors
and a 1-Hot matrix (2D numpy array) of the labels.
"""
per_batch_per_label = (args.batch_size // len(args.labels))
tensor_counts = Counter()
tensors = {}
if args.window_size > 0:
channel_map = vqsr_cnn.get_tensor_channel_map_from_args(args)
tensor = np.zeros((args.batch_size, args.window_size, len(channel_map)))
annotation_data = np.zeros((args.batch_size, len(args.annotations)))
label_matrix = np.zeros((args.batch_size, len(args.labels)))
for tp in train_paths:
label_key = os.path.basename(tp)
if label_key not in args.labels:
print('Skipping label directory:', label_key, ' which is not in args label set:', args.labels.keys())
continue
label = args.labels[label_key]
tensors[label] = [os.path.join(tp, t) for t in os.listdir(tp)
if os.path.splitext(t)[1] == vqsr_cnn.TENSOR_SUFFIX]
tensor_counts[label] = 0
print('Found ', len(tensors[label]), 'examples of label:', label, 'in:', tp)
while True:
cur_example = 0
for label in tensors.keys():
for i in range(per_batch_per_label):
tensor_path = tensors[label][tensor_counts[label]]
label_matrix[cur_example, label] = 1.0
with h5py.File(tensor_path,'r') as hf:
annotation_data[cur_example,:] = np.array(hf.get(args.annotation_set))
if args.window_size > 0:
tensor[cur_example,:,:] = np.array(hf.get(args.tensor_name))
tensor_counts[label] += 1
if tensor_counts[label] == len(tensors[label]):
np.random.shuffle(tensors[label])
print('\nGenerator shuffled & looped over:', tensor_counts[label],
'examples of label:',label, '\nLast tensor was:', tensor_path)
tensor_counts[label] = 0
cur_example += 1
if cur_example == args.batch_size:
break
if args.window_size > 0:
yield ({args.tensor_name:tensor, args.annotation_set:annotation_data}, label_matrix)
else:
yield (annotation_data, label_matrix)
def tensor_generator_from_label_dirs_and_args(args, train_paths, with_positions=False):
"""Data generator of tensors with reads, and annotations.
Assumes train paths contains example in labelled directories.
Loops over all examples sampling args.batch_size examples
uniformly from each label.
Arguments:
args: args object needed for batch_size, labels, and annotations
train_paths: array of label directories with hd5 tensors within each
with_positions: boolean if True will include a position string
(i.e. "1_1234_0" for tensor from contig one base 1234 and first allele)
as the last element in each tensor tuple.
Returns:
A tuple with a dict of the input tensors
and a 1-Hot matrix (2D numpy array) of the labels.
"""
batch = {}
tensors = {}
tensor_counts = Counter()
per_batch_per_label = (args.batch_size // len(args.labels) )
tm = vqsr_cnn.get_tensor_channel_map_from_args(args)
if tm:
tensor_shape = vqsr_cnn.tensor_shape_from_args(args)
batch[args.tensor_name] = np.zeros(((args.batch_size,)+tensor_shape))
if vqsr_cnn.annotations_from_args(args):
batch[args.annotation_set] = np.zeros((args.batch_size, len(args.annotations)))
if with_positions:
positions = []
label_matrix = np.zeros((args.batch_size, len(args.labels)))
for tp in train_paths:
label_key = os.path.basename(tp)
if label_key not in args.labels:
print('Skipping label directory:', label_key, ' which is not in args label set:', args.labels.keys())
continue
label = args.labels[label_key]
tensors[label] = [os.path.join(tp, t) for t in os.listdir(tp)
if os.path.splitext(t)[1] == vqsr_cnn.TENSOR_SUFFIX]
tensor_counts[label] = 0
print('Found ', len(tensors[label]), 'examples of label:', label, 'in:', tp)
while True:
cur_example = 0
for label in tensors.keys():
for i in range(per_batch_per_label):
tensor_path = tensors[label][tensor_counts[label]]
with h5py.File(tensor_path, 'r') as hf:
for key in batch.keys():
batch[key][cur_example] = np.array(hf.get(key))
label_matrix[cur_example, label] = 1.0
tensor_counts[label] += 1
if tensor_counts[label] == len(tensors[label]):
np.random.shuffle(tensors[label])
print('\nGenerator looped over:', tensor_counts[label],
'examples of label:', label, '\nShuffled them. Last tensor was:', tensor_path)
tensor_counts[label] = 0
if with_positions:
positions.append(position_string_from_tensor_name(tensor_path))
cur_example += 1
if cur_example == args.batch_size:
break
if with_positions:
yield (batch, label_matrix, positions)
positions = []
else:
yield (batch, label_matrix)
label_matrix = np.zeros((args.batch_size, len(args.labels)))
if with_positions and tm:
tensor_shape = vqsr_cnn.tensor_shape_from_args(args)
batch[args.tensor_name] = np.zeros(((args.batch_size,)+tensor_shape))
if with_positions and vqsr_cnn.annotations_from_args(args):
batch[args.annotation_set] = np.zeros((args.batch_size, len(args.annotations)))
def load_dna_annotations_positions_from_class_dirs(args, train_paths,
per_class_max=4000, include_dna=True, include_annotations=True):
count = 0
annotation_data = []
reference_data = []
labels_data = []
positions = []
for tp in train_paths:
label_key = os.path.basename(tp)
if label_key not in args.labels:
print('Skipping label directory:', label_key, ' which is not in args label set:', args.labels.keys())
continue
label = args.labels[label_key]
imgs = os.listdir(tp)
count += 1
print(count, " dir out of:", len(train_paths), tp, "has:", len(imgs))
this_t = 0
for t in imgs:
this_t += 1
if this_t > per_class_max:
print('Per class max reached. bailing at', this_t)
break
fn, file_extension = os.path.splitext(t)
if not file_extension.lower() == vqsr_cnn.TENSOR_SUFFIX:
continue
with h5py.File(tp+'/'+t, 'r') as hf:
if include_annotations:
annotation_data.append(np.array(hf.get(args.annotation_set)))
if include_dna:
reference_data.append(np.array(hf.get(args.tensor_name)))
y_vector = np.zeros(len(args.labels)) # One hot Y vector of size labels, correct label is 1 others are 0
y_vector[label] = 1.0
labels_data.append(y_vector)
positions.append(position_string_from_tensor_name(t))
if include_dna and include_annotations:
return np.asarray(reference_data), np.asarray(annotation_data), np.asarray(labels_data), np.asarray(positions)
elif include_annotations:
return np.asarray(annotation_data), np.asarray(labels_data), np.asarray(positions)
elif include_dna:
return np.asarray(reference_data), np.asarray(labels_data), np.asarray(positions)
def load_tensors_and_annotations_from_class_dirs(args, train_paths, per_class_max=2500, position_dict=None):
annotations = []
positions = []
tensors = []
labels = []
count = 0
for tp in train_paths:
label_key = os.path.basename(tp)
if label_key not in args.labels:
print('Skipping label directory:', label_key, ' which is not in args label set:', args.labels.keys())
continue
label = args.labels[label_key]
imgs = os.listdir(tp)
count += 1
this_t = 0
for t in imgs:
if this_t > per_class_max:
print('Per class max reached. bailing at', this_t)
break
fn, file_extension = os.path.splitext(t)
if not file_extension.lower() == vqsr_cnn.TENSOR_SUFFIX:
continue
with h5py.File(tp+'/'+t, 'r') as hf:
tensors.append(np.array(hf.get(args.tensor_name)))
annotations.append(np.array(hf.get(args.annotation_set)))
y_vector = np.zeros(len(args.labels)) # One hot Y vector of size labels, correct label is 1 all others are 0
y_vector[label] = 1.0
labels.append(y_vector)
positions.append(position_string_from_tensor_name(t))
this_t += 1
print(count, " dir out of:", len(train_paths), tp, "has:", len(imgs), 'Loaded:', this_t)
return np.asarray(tensors), np.asarray(annotations), np.asarray(labels), np.asarray(positions)
def position_string_from_tensor_name(tensor_name):
'''Genomic position as underscore delineated string from a filename.
Includes an allele index if the filename includes _allele_
This is ugly, we need file names ending with genomic position
(e.g. my_tensor-12_1234.h5 returns 12_1234 and a_tensor_allele_1-8_128.hd5 returns 8_128_1)
Arguments:
tensor_name: the filename to parse
Returns:
Genomic position string Contig_Position or Contig_Position_AlleleIndex
'''
slash_split = tensor_name.split('/')
dash_split = slash_split[-1].split('-')
gsplit = dash_split[0].split('_')
gpos = dash_split[-1]
chrom = gpos.split('_')[0]
pos = os.path.splitext(gpos.split('_')[1])[0]
pos_str = chrom + '_' + pos
for i,p in enumerate(gsplit):
if p == 'allele':
pos_str += '_'+str(gsplit[i+1])
return pos_str
def get_path_to_train_valid_or_test(path, valid_ratio=0.1, test_ratio=0.2, valid_contig='-19_', test_contig='-20_'):
dice = np.random.rand()
if dice < valid_ratio or valid_contig in path:
return os.path.join(path, 'valid/')
elif dice < valid_ratio+test_ratio or test_contig in path:
return os.path.join(path, 'test/')
else:
return os.path.join(path, 'train/')
def get_train_valid_test_paths(args):
train_dir = args.data_dir + 'train/'
valid_dir = args.data_dir + 'valid/'
test_dir = args.data_dir + 'test/'
train_paths = [train_dir + tp for tp in sorted(os.listdir(train_dir)) if os.path.isdir(train_dir + tp)]
valid_paths = [valid_dir + vp for vp in sorted(os.listdir(valid_dir)) if os.path.isdir(valid_dir + vp)]
test_paths = [test_dir + vp for vp in sorted(os.listdir(test_dir)) if os.path.isdir(test_dir + vp)]
assert(len(train_paths) == len(valid_paths) == len(test_paths))
return train_paths, valid_paths, test_paths
def plain_name(full_name):
name = os.path.basename(full_name)
return name.split('.')[0]
# Back to the top!
if "__main__" == __name__:
run() | bsd-3-clause | 4,743,806,703,012,307,000 | 39.327541 | 120 | 0.607829 | false |
felipenaselva/repo.felipe | plugin.video.exodus/resources/lib/indexers/movies.py | 2 | 41974 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import metacache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
import os,sys,re,json,urllib,urlparse,base64,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
control.moderator()
class movies:
def __init__(self):
self.list = []
self.imdb_link = 'http://www.imdb.com'
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.trakt_user = control.setting('trakt.user').strip()
self.imdb_user = control.setting('imdb.user').replace('ur', '')
self.lang = control.apiLanguage()['trakt']
self.search_link = 'http://api-v2launch.trakt.tv/search?type=movie&limit=100&query='
self.trakt_info_link = 'http://api-v2launch.trakt.tv/movies/%s?extended=images'
self.trakt_lang_link = 'http://api-v2launch.trakt.tv/movies/%s/translations/%s'
self.imdb_info_link = 'http://www.omdbapi.com/?i=%s&plot=full&r=json'
self.persons_link = 'http://www.imdb.com/search/name?count=100&name='
self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female'
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&sort=num_votes,desc&count=40&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=40&start=1'
self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&release_date=date[730],date[30]&genres=%s&sort=moviemeter,asc&count=40&start=1'
self.language_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&languages=%s&sort=moviemeter,asc&count=40&start=1'
self.certification_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&certificates=us:%s&sort=moviemeter,asc&count=40&start=1'
self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&year=%s,%s&sort=moviemeter,asc&count=40&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&production_status=released&sort=boxoffice_gross_us,desc&count=40&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&production_status=released&groups=oscar_best_picture_winners&sort=year,desc&count=40&start=1'
self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&languages=en&num_votes=1000,&release_date=date[365],date[0]&sort=release_date_us,desc&count=40&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=40&page=1'
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/me/collection/movies'
self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/me/watchlist/movies'
self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/movies?limit=40&page=1'
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdblist2_link = 'http://www.imdb.com/list/%s/?view=detail&sort=created:desc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=alpha,asc' % self.imdb_user
self.imdbwatchlist2_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=date_added,desc' % self.imdb_user
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/users/me/' in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception()
self.list = cache.get(self.trakt_list, 720, url, self.trakt_user)
except:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
if '/users/me/' in url and not '/watchlist/' in url:
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower()))
if idx == True: self.worker()
elif u in self.trakt_link and self.search_link in url:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url, self.trakt_user)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 0, url)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
if idx == True: self.movieDirectory(self.list)
return self.list
except:
pass
def widget(self):
setting = control.setting('movie.widget')
if setting == '2':
self.get(self.trending_link)
elif setting == '3':
self.get(self.popular_link)
elif setting == '4':
self.get(self.theaters_link)
else:
self.get(self.featured_link)
def search(self):
try:
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
url = self.search_link + urllib.quote_plus(q)
url = '%s?action=movies&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
except:
return
def person(self):
try:
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
url = self.persons_link + urllib.quote_plus(q)
url = '%s?action=moviePersons&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
except:
return
def genres(self):
genres = [
('Action', 'action'),
('Adventure', 'adventure'),
('Animation', 'animation'),
('Biography', 'biography'),
('Comedy', 'comedy'),
('Crime', 'crime'),
('Drama', 'drama'),
('Family', 'family'),
('Fantasy', 'fantasy'),
('History', 'history'),
('Horror', 'horror'),
('Music ', 'music'),
('Musical', 'musical'),
('Mystery', 'mystery'),
('Romance', 'romance'),
('Science Fiction', 'sci_fi'),
('Sport', 'sport'),
('Thriller', 'thriller'),
('War', 'war'),
('Western', 'western')
]
for i in genres: self.list.append({'name': cleangenre.lang(i[0], self.lang), 'url': self.genre_link % i[1], 'image': 'genres.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def languages(self):
languages = [
('Arabic', 'ar'),
('Bulgarian', 'bg'),
('Chinese', 'zh'),
('Croatian', 'hr'),
('Dutch', 'nl'),
('English', 'en'),
('Finnish', 'fi'),
('French', 'fr'),
('German', 'de'),
('Greek', 'el'),
('Hebrew', 'he'),
('Hindi ', 'hi'),
('Hungarian', 'hu'),
('Icelandic', 'is'),
('Italian', 'it'),
('Japanese', 'ja'),
('Korean', 'ko'),
('Norwegian', 'no'),
('Persian', 'fa'),
('Polish', 'pl'),
('Portuguese', 'pt'),
('Punjabi', 'pa'),
('Romanian', 'ro'),
('Russian', 'ru'),
('Spanish', 'es'),
('Swedish', 'sv'),
('Turkish', 'tr'),
('Ukrainian', 'uk')
]
for i in languages: self.list.append({'name': str(i[0]), 'url': self.language_link % i[1], 'image': 'languages.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def certifications(self):
certificates = ['G', 'PG', 'PG-13', 'R', 'NC-17']
for i in certificates: self.list.append({'name': str(i), 'url': self.certification_link % str(i).replace('-', '_').lower(), 'image': 'certificates.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % (str(i), str(i)), 'image': 'years.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self, url):
if url == None:
self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
else:
self.list = cache.get(self.imdb_person_list, 0, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = item['images']['poster']['medium']
except: pass
if poster == None or not '/posters/' in poster: poster = '0'
poster = poster.rsplit('?', 1)[0]
poster = poster.encode('utf-8')
banner = poster
try: banner = item['images']['banner']['full']
except: pass
if banner == None or not '/banners/' in banner: banner = '0'
banner = banner.rsplit('?', 1)[0]
banner = banner.encode('utf-8')
fanart = '0'
try: fanart = item['images']['fanart']['full']
except: pass
if fanart == None or not '/fanarts/' in fanart: fanart = '0'
fanart = fanart.rsplit('?', 1)[0]
fanart = fanart.encode('utf-8')
try: premiered = item['released']
except: premiered = '0'
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try: genre = item['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
try: mpaa = item['certification']
except: mpaa = '0'
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
try: plot = item['overview']
except: plot = '0'
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
def imdb_watchlist_id(url):
return client.parseDOM(client.request(url).decode('iso-8859-1').encode('utf-8'), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
if url == self.imdbwatchlist_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
elif url == self.imdbwatchlist2_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist2_link % url
result = client.request(url)
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
if len(next) == 0:
next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
next = [i[0] for i in next if 'Next' in i[1]]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = client.parseDOM(item, 'a')[1]
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
year = re.findall('(\d{4})', year[0])[0]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = re.findall('(tt\d*)', imdb)[0]
imdb = imdb.encode('utf-8')
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: poster = '0'
poster = re.sub('(?:_SX\d+?|)(?:_SY\d+?|)(?:_UX\d+?|)_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
except: genre = '0'
genre = ' / '.join([i.strip() for i in genre.split(',')])
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
except: duration = '0'
duration = duration.encode('utf-8')
rating = '0'
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: pass
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
except: pass
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
except: votes = '0'
try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
except: director = '0'
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
except: cast = '0'
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next})
except:
pass
return self.list
def imdb_person_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def worker(self):
self.meta = []
total = len(self.list)
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.lang)
for r in range(0, total, 40):
threads = []
for i in range(r, r+40):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
if len(self.meta) > 0: metacache.insert(self.meta)
self.list = [i for i in self.list if not i['imdb'] == '0']
def super_info(self, i):
try:
if self.list[i]['metacache'] == True: raise Exception()
imdb = self.list[i]['imdb']
url = self.imdb_info_link % imdb
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['Title']
title = title.encode('utf-8')
if not title == '0': self.list[i].update({'title': title})
year = item['Year']
year = year.encode('utf-8')
if not year == '0': self.list[i].update({'year': year})
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
imdb = imdb.encode('utf-8')
if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb})
premiered = item['Released']
if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0'
premiered = re.findall('(\d*) (.+?) (\d*)', premiered)
try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0])
except: premiered = '0'
premiered = premiered.encode('utf-8')
if not premiered == '0': self.list[i].update({'premiered': premiered})
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': genre = '0'
genre = genre.replace(', ', ' / ')
genre = genre.encode('utf-8')
if not genre == '0': self.list[i].update({'genre': genre})
duration = item['Runtime']
if duration == None or duration == '' or duration == 'N/A': duration = '0'
duration = re.sub('[^0-9]', '', str(duration))
duration = duration.encode('utf-8')
if not duration == '0': self.list[i].update({'duration': duration})
rating = item['imdbRating']
if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
votes = item['imdbVotes']
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None or votes == '' or votes == 'N/A': votes = '0'
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes})
mpaa = item['Rated']
if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
mpaa = mpaa.encode('utf-8')
if not mpaa == '0': self.list[i].update({'mpaa': mpaa})
director = item['Director']
if director == None or director == '' or director == 'N/A': director = '0'
director = director.replace(', ', ' / ')
director = re.sub(r'\(.*?\)', '', director)
director = ' '.join(director.split())
director = director.encode('utf-8')
if not director == '0': self.list[i].update({'director': director})
writer = item['Writer']
if writer == None or writer == '' or writer == 'N/A': writer = '0'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
if not writer == '0': self.list[i].update({'writer': writer})
cast = item['Actors']
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
if not cast == '0': self.list[i].update({'cast': cast})
plot = item['Plot']
if plot == None or plot == '' or plot == 'N/A': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
studio = self.list[i]['studio']
url = self.trakt_info_link % imdb
item = trakt.getTrakt(url)
item = json.loads(item)
poster = '0'
try: poster = item['images']['poster']['medium']
except: pass
if poster == None or not '/posters/' in poster: poster = '0'
poster = poster.rsplit('?', 1)[0]
if poster == '0': poster = self.list[i]['poster']
poster = poster.encode('utf-8')
if not poster == '0': self.list[i].update({'poster': poster})
banner = '0'
try: banner = item['images']['banner']['full']
except: pass
if banner == None or not '/banners/' in banner: banner = '0'
banner = banner.rsplit('?', 1)[0]
banner = banner.encode('utf-8')
if not banner == '0': self.list[i].update({'banner': banner})
fanart = '0'
try: fanart = item['images']['fanart']['full']
except: pass
if fanart == None or not '/fanarts/' in fanart: fanart = '0'
fanart = fanart.rsplit('?', 1)[0]
if fanart == '0': poster = self.list[i]['fanart']
fanart = fanart.encode('utf-8')
if not fanart == '0': self.list[i].update({'fanart': fanart})
if not self.lang == 'en':
url = self.trakt_lang_link % (imdb, self.lang)
item = trakt.getTrakt(url)
item = json.loads(item)[0]
t = item['title']
if not (t == None or t == ''): title = t
try: title = title.encode('utf-8')
except: pass
if not title == '0': self.list[i].update({'title': title})
t = item['overview']
if not (t == None or t == ''): plot = t
try: plot = plot.encode('utf-8')
except: pass
if not plot == '0': self.list[i].update({'plot': plot})
self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.lang, 'item': {'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}})
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isEstuary = True if 'estuary' in control.skin else False
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators()
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
nextMenu = control.lang(32053).encode('utf-8')
for i in items:
try:
label = '%s (%s)' % (i['title'], i['year'])
imdb, title, year = i['imdb'], i['originaltitle'], i['year']
sysname = urllib.quote_plus('%s (%s)' % (title, year))
systitle = urllib.quote_plus(title)
poster, banner, fanart = i['poster'], i['banner'], i['fanart']
if banner == '0' and not fanart == '0': banner = fanart
elif banner == '0' and not poster == '0': banner = poster
if poster == '0': poster = addonPoster
if banner == '0': banner = addonBanner
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'movie'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
#meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb})
if i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
if isEstuary == True:
try: del meta['cast']
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
try:
overlay = int(playcount.getMovieOverlay(indicators, imdb))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
item.setArt({'icon': poster, 'thumb': poster, 'poster': poster, 'banner': banner})
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
icon = control.addonNext()
url = '%s?action=moviePage&url=%s' % (sysaddon, urllib.quote_plus(url))
item = control.item(label=nextMenu)
item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'banner': icon})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'movies')
#control.do_block_check(False)
control.directory(syshandle, cacheToDisc=True)
views.setView('movies', {'skin.confluence': 500})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http://'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
#control.do_block_check(False)
control.directory(syshandle, cacheToDisc=True)
| gpl-2.0 | 7,360,804,568,550,957,000 | 41.100301 | 420 | 0.523324 | false |
alsrgv/tensorflow | tensorflow/python/kernel_tests/substr_op_test.py | 27 | 20553 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SubstrOpTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testScalarString(self, dtype, pos, unit):
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"He\xc3\xc3\U0001f604".encode("utf-8"),
}[unit]
expected_value = {
"BYTE": b"ell",
"UTF8_CHAR": u"e\xc3\xc3".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testScalarString_EdgeCases(self, dtype, unit):
# Empty string
test_string = {
"BYTE": b"",
"UTF8_CHAR": u"".encode("utf-8"),
}[unit]
expected_value = b""
position = np.array(0, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Full string
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(0, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, test_string)
# Full string (Negative)
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-5, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, test_string)
# Length is larger in magnitude than a negative position
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
expected_string = {
"BYTE": b"ello",
"UTF8_CHAR": u"\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-4, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_string)
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testVectorStrings(self, dtype, pos, unit):
test_string = {
"BYTE": [b"Hello", b"World"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"H\xc3llo",
u"W\U0001f604rld"]],
}[unit]
expected_value = {
"BYTE": [b"ell", b"orl"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"\xc3ll", u"\U0001f604rl"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testMatrixStrings(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]]],
}[unit]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = {
"BYTE": [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u053c\u025bv\u025b",
u"w\u0c1dlv"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"\U0001f604rld",
u"\xfcd\xea"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
position = np.array(-3, dtype)
length = np.array(2, dtype)
expected_value = {
"BYTE": [[b"te", b"ve", b"lv"], [b"ee", b"ee", b"ee"],
[b"ee", b"ee", b"ee"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227",
u"v\u025b", u"lv"]],
[x.encode("utf-8") for x in [u"\xc3\xc3", u"rl",
u"\xfcd"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testElementWisePosLen(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]],
[x.encode("utf-8") for x in [u"sixt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, -4, 3], [1, 2, -4], [-5, 2, 3]], dtype)
length = np.array([[2, 2, 4], [4, 3, 2], [5, 5, 5]], dtype)
expected_value = {
"BYTE": [[b"en", b"ev", b"lve"], [b"hirt", b"urt", b"te"],
[b"xteen", b"vente", b"hteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u025bv",
u"lv\u025b"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"rld",
u"d\xfc"]],
[x.encode("utf-8") for x in [u"xt\xea\xean",
u"\U00010299ente",
u"h\x86een"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testBroadcast(self, dtype, unit):
# Broadcast pos/len onto input string
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"],
[b"nineteen", b"twenty", b"twentyone"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]],
[x.encode("utf-8") for x in [u"nineteen",
u"twenty",
u"twentyone"]]],
}[unit]
position = np.array([1, -4, 3], dtype)
length = np.array([1, 2, 3], dtype)
expected_value = {
"BYTE": [[b"e", b"ev", b"lve"], [b"h", b"te", b"tee"],
[b"i", b"te", b"hte"], [b"i", b"en", b"nty"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227",
u"\u025bv", u"lv\u025b"]],
[x.encode("utf-8") for x in [u"h", u"t\xea", u"tee"]],
[x.encode("utf-8") for x in [u"\xcd", u"te", u"h\x86e"]],
[x.encode("utf-8") for x in [u"i", u"en", u"nty"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = {
"BYTE": [b"thirteen", b"fourteen", b"fifteen"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
}[unit]
position = np.array([[1, -2, 3], [-3, 2, 1], [5, 5, -5]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
expected_value = {
"BYTE": [[b"hir", b"en", b"t"], [b"e", b"ur", b"ift"],
[b"ee", b"ee", b"ft"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"h\xcdr", u"\xean", u"t"]],
[x.encode("utf-8") for x in [u"\xea", u"ur",
u"\xcd\ua09ct"]],
[x.encode("utf-8") for x in [u"\xea\xea", u"\xea\xea",
u"\ua09ct"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
# Test 1D broadcast
test_string = {
"BYTE": b"thirteen",
"UTF8_CHAR": u"th\xcdrt\xea\xean".encode("utf-8"),
}[unit]
position = np.array([1, -4, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = {
"BYTE": [b"hir", b"te", b"n"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"h\xcdr", u"t\xea", u"n"]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = self.evaluate(substr_op)
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testBadBroadcast(self, dtype, unit):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, -3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length, unit=unit)
@parameterized.parameters(
(np.int32, 6, "BYTE"),
(np.int64, 6, "BYTE"),
(np.int32, -6, "BYTE"),
(np.int64, -6, "BYTE"),
(np.int32, 6, "UTF8_CHAR"),
(np.int64, 6, "UTF8_CHAR"),
(np.int32, -6, "UTF8_CHAR"),
(np.int64, -6, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_Scalar(self, dtype, pos, unit):
# Scalar/Scalar
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, 4, "BYTE"),
(np.int64, 4, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 4, "UTF8_CHAR"),
(np.int64, 4, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_VectorScalar(self, dtype, pos, unit):
# Vector/Scalar
test_string = {
"BYTE": [b"good", b"good", b"bad", b"good"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"g\xc3\xc3d", u"b\xc3d",
u"g\xc3\xc3d"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(1, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_MatrixMatrix(self, dtype, unit):
# Matrix/Matrix
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]]],
}[unit]
position = np.array([[1, 2, 3], [1, 2, 4], [1, 2, 3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
# Matrix/Matrix (with negative)
position = np.array([[1, 2, -3], [1, 2, -4], [1, 2, -3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testOutOfRangeError_Broadcast(self, dtype, unit):
# Broadcast
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]]],
}[unit]
position = np.array([1, 2, 4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
# Broadcast (with negative)
position = np.array([-1, -2, -4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(substr_op)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
@test_util.run_deprecated_v1
def testMismatchPosLenShapes(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: position/length have different dimensionality
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
@test_util.run_deprecated_v1
def testWrongDtype(self):
with self.cached_session():
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3, 1.0)
@test_util.run_deprecated_v1
def testInvalidUnit(self):
with self.cached_session():
with self.assertRaises(ValueError):
string_ops.substr(b"test", 3, 1, unit="UTF8")
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,213,583,889,679,557,000 | 40.354125 | 80 | 0.503576 | false |
sekikn/incubator-airflow | tests/providers/google/cloud/operators/test_video_intelligence_system.py | 10 | 2040 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from airflow.providers.google.cloud.example_dags.example_video_intelligence import GCP_BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_VIDEO_SOURCE_URL = "https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=["bash", "-c", f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"],
key=GCP_GCS_KEY,
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_example_dag(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| apache-2.0 | 3,666,093,671,625,131,000 | 40.632653 | 110 | 0.730392 | false |
mojwang/selenium | py/test/selenium/webdriver/common/w3c_interaction_tests.py | 6 | 6270 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.actions.action_builder import ActionBuilder
from selenium.webdriver.support.ui import WebDriverWait
def test_should_be_able_to_get_pointer_and_keyboard_inputs(driver, pages):
actions = ActionBuilder(driver)
pointers = actions.pointer_inputs
keyboards = actions.key_inputs
assert pointers is not None
assert keyboards is not None
def testSendingKeysToActiveElementWithModifier(driver, pages):
pages.load("formPage.html")
e = driver.find_element_by_id("working")
e.click()
actions = ActionBuilder(driver)
key_action = actions.key_action
key_action.key_down(Keys.SHIFT) \
.send_keys("abc") \
.key_up(Keys.SHIFT)
actions.perform()
assert "ABC" == e.get_attribute('value')
def test_can_create_pause_action_on_keyboard(driver, pages):
# If we don't get an error and takes less than 3 seconds to run, we are good
import datetime
start = datetime.datetime.now()
actions1 = ActionBuilder(driver)
key_actions = actions1.key_action
key_actions.pause(1)
actions1.perform()
finish = datetime.datetime.now()
assert (finish - start).seconds <= 3
# Add a filler step
actions2 = ActionBuilder(driver)
key_action = actions2.key_action
key_action.pause()
actions2.perform()
def test_can_create_pause_action_on_pointer(driver, pages):
# If we don't get an error and takes less than 3 seconds to run, we are good
import datetime
start = datetime.datetime.now()
actions1 = ActionBuilder(driver)
key_actions = actions1.pointer_action
key_actions.pause(1)
actions1.perform()
finish = datetime.datetime.now()
assert (finish - start).seconds <= 3
# Add a filler step
actions2 = ActionBuilder(driver)
key_action = actions2.pointer_action
key_action.pause()
actions2.perform()
def test_can_clear_actions(driver, pages):
actions = ActionBuilder(driver)
actions.clear_actions()
def test_move_and_click(driver, pages):
pages.load("javascriptPage.html")
toClick = driver.find_element_by_id("clickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.move_to(toClick) \
.click()
actions.perform()
assert "Clicked" == toClick.get_attribute('value')
def testDragAndDrop(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
element_available_timeout = 15
wait = WebDriverWait(driver, element_available_timeout)
pages.load("droppableItems.html")
wait.until(lambda dr: _isElementAvailable(driver, "draggable"))
if not _isElementAvailable(driver, "draggable"):
raise AssertionError("Could not find draggable element after 15 seconds.")
toDrag = driver.find_element_by_id("draggable")
dropInto = driver.find_element_by_id("droppable")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.click_and_hold(toDrag) \
.move_to(dropInto)\
.release()
actions.perform()
dropInto = driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
assert "Dropped!" == text
def test_context_click(driver, pages):
pages.load("javascriptPage.html")
toContextClick = driver.find_element_by_id("doubleClickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.context_click(toContextClick)
actions.perform()
assert "ContextClicked" == toContextClick.get_attribute('value')
def test_double_click(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
pages.load("javascriptPage.html")
toDoubleClick = driver.find_element_by_id("doubleClickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.double_click(toDoubleClick)
actions.perform()
assert "DoubleClicked" == toDoubleClick.get_attribute('value')
def test_dragging_element_with_mouse_moves_it_to_another_list(driver, pages):
_performDragAndDropWithMouse(driver, pages)
dragInto = driver.find_element_by_id("sortable1")
assert 6 == len(dragInto.find_elements_by_tag_name("li"))
def test_dragging_element_with_mouse_fires_events(driver, pages):
_performDragAndDropWithMouse(driver, pages)
dragReporter = driver.find_element_by_id("dragging_reports")
assert "Nothing happened. DragOut DropIn RightItem 3" == dragReporter.text
def _performDragAndDropWithMouse(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
pages.load("draggableLists.html")
dragReporter = driver.find_element_by_id("dragging_reports")
toDrag = driver.find_element_by_id("rightitem-3")
dragInto = driver.find_element_by_id("sortable1")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.click_and_hold(toDrag) \
.move_to(driver.find_element_by_id("leftitem-4")) \
.move_to(dragInto) \
.release()
assert "Nothing happened." == dragReporter.text
actions.perform()
assert "Nothing happened. DragOut" in dragReporter.text
def _isElementAvailable(driver, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
driver.find_element_by_id(id)
return True
except Exception:
return False
| apache-2.0 | -8,803,629,755,663,828,000 | 31.319588 | 82 | 0.712281 | false |
gdi2290/rethinkdb | scripts/generate_serialize_macros.py | 2 | 13439 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
import sys
"""This script is used to generate the RDB_MAKE_SERIALIZABLE_*() and
RDB_MAKE_ME_SERIALIZABLE_*() macro definitions. Because there are so
many variations, and because they are so similar, it's easier to just
have a Python script to generate them.
This script is meant to be run as follows (assuming you are in the
"rethinkdb/src" directory):
$ ../scripts/generate_serialize_macros.py > rpc/serialize_macros.hpp
"""
def generate_make_serializable_macro(nfields):
fields = "".join(", field%d" % (i + 1) for i in xrange(nfields))
print "#define RDB_MAKE_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
zeroarg = ("UNUSED " if nfields == 0 else "")
print " template <cluster_version_t W> \\"
print " void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % \
(nfields, fields)
zeroarg = ("UNUSED " if nfields == 0 else "")
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " %swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<cluster_version_t::CLUSTER>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " %sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<cluster_version_t::CLUSTER>( \\"
print " s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_FOR_CLUSTER_within_a_class_body"
print
# See the note in the comment below.
print "#define RDB_IMPL_SERIALIZABLE_%d(type_t%s) RDB_MAKE_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields, nfields, fields)
print
print "#define RDB_IMPL_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % (nfields, fields)
print " RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_FOR_CLUSTER(type_t);"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_13(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_13(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_16(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_16(type_t)"
def generate_make_me_serializable_macro(nfields):
print "#define RDB_MAKE_ME_SERIALIZABLE_%d(%s) \\" % \
(nfields, ", ".join("field%d" % (i + 1) for i in xrange(nfields)))
zeroarg = ("UNUSED " if nfields == 0 else "")
print " friend class write_message_t; \\"
print " template <cluster_version_t W> \\"
print " void rdb_serialize(%swrite_message_t *wm) const { \\" % zeroarg
for i in xrange(nfields):
print " serialize<W>(wm, field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t rdb_deserialize(%sread_stream_t *s) { \\" % zeroarg
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " friend class archive_deserializer_t"
def generate_impl_me_serializable_macro(nfields):
print "#define RDB_IMPL_ME_SERIALIZABLE_%d(typ%s) \\" % \
(nfields, "".join(", field%d" % (i + 1) for i in xrange(nfields)))
zeroarg = ("UNUSED " if nfields == 0 else "")
print " template <cluster_version_t W> \\"
print " void typ::rdb_serialize(%swrite_message_t *wm) const { \\" % zeroarg
for i in xrange(nfields):
print " serialize<W>(wm, field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t typ::rdb_deserialize(%sread_stream_t *s) { \\" % zeroarg
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print
print "#define RDB_IMPL_ME_SERIALIZABLE_%d_FOR_CLUSTER(typ%s) \\" % \
(nfields, "".join(", field%d" % (i + 1) for i in xrange(nfields)))
zeroarg = ("UNUSED " if nfields == 0 else "")
print " template <> \\"
print " void typ::rdb_serialize<cluster_version_t::CLUSTER>( \\"
print " %swrite_message_t *wm) const { \\" % zeroarg
for i in xrange(nfields):
print " serialize<cluster_version_t::CLUSTER>(wm, field%d); \\" % (i + 1)
print " } \\"
print " template <> \\"
print " archive_result_t typ::rdb_deserialize<cluster_version_t::CLUSTER>( \\"
print " %sread_stream_t *s) { \\" % zeroarg
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<cluster_version_t::CLUSTER>( \\"
print " s, deserialize_deref(field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " INSTANTIATE_SERIALIZABLE_FOR_CLUSTER(typ)"
print
print "#define RDB_IMPL_ME_SERIALIZABLE_%d_SINCE_v1_13(typ%s) \\" % \
(nfields, "".join(", field%d" % (i + 1) for i in xrange(nfields)))
print " RDB_IMPL_ME_SERIALIZABLE_%d(typ%s); \\" % \
(nfields, "".join(", field%d" % (i + 1) for i in xrange(nfields)))
print " INSTANTIATE_SERIALIZABLE_SELF_SINCE_v1_13(typ)"
if __name__ == "__main__":
print "// Copyright 2010-2014 RethinkDB, all rights reserved."
print "#ifndef RPC_SERIALIZE_MACROS_HPP_"
print "#define RPC_SERIALIZE_MACROS_HPP_"
print
print "/* This file is automatically generated by '%s'." % " ".join(sys.argv)
print "Please modify '%s' instead of modifying this file.*/" % sys.argv[0]
print
print "#include <type_traits>"
print
print "#include \"containers/archive/archive.hpp\""
print "#include \"containers/archive/versioned.hpp\""
print "#include \"errors.hpp\""
print "#include \"version.hpp\""
print
print """
/* The purpose of these macros is to make it easier to serialize and
unserialize data types that consist of a simple series of fields, each
of which is serializable. Suppose we have a type "struct point_t {
int32_t x, y; }" that we want to be able to serialize. To make it
serializable automatically, either write
RDB_MAKE_SERIALIZABLE_2(point_t, x, y) at the global scope, or write
RDB_MAKE_ME_SERIALIZABLE(x, y) within the body of the point_t type and
RDB_SERIALIZE_OUTSIDE(point_t) in the global scope.
The _FOR_CLUSTER variants of the macros exist to indicate that a type
can only be serialized for use within the cluster, thus should not be
serialized to disk.
The _SINCE_v1_13 variants of the macros exist to make the conversion to
versioned serialization easier. They must only be used for types which
serialization format has not changed since version 1.13.0.
Once the format changes, you can still use the macros without
the _SINCE_v1_13 suffix and instantiate the serialize() and deserialize()
functions explicitly for a certain version.
We use dummy "extern int" declarations to force a compile error in
macros that should not be used inside of class bodies. */
""".strip()
print "namespace helper {"
print
print "/* When a `static_assert` is used within a templated class or function,"
print " * but does not depend on any template parameters the C++ compiler is free"
print " * to evaluate the assert even before instantiating that template. This"
print " * helper class allows a `static_assert(false, ...)` to depend on the"
print " * `cluster_version_t` template parameter."
print " * Also see http://stackoverflow.com/a/14637534. */"
print "template <cluster_version_t W>"
print "struct always_false"
print " : std::false_type { };"
print
print "} // namespace helper"
print
print "#define RDB_DECLARE_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *s, type_t *thing)"
print
print "#define RDB_DECLARE_SERIALIZABLE_FOR_CLUSTER(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only serializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *, type_t *) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only deserializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " read_stream_t *s, type_t *thing)"
print
print "#define RDB_DECLARE_ME_SERIALIZABLE \\"
print " friend class write_message_t; \\"
print " template <cluster_version_t W> \\"
print " void rdb_serialize(write_message_t *wm) const; \\"
print " friend class archive_deserializer_t; \\"
print " template <cluster_version_t W> \\"
print " archive_result_t rdb_deserialize(read_stream_t *s)"
print
print "#define RDB_SERIALIZE_OUTSIDE(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *wm, const type_t &thing) { \\"
print " thing.template rdb_serialize<W>(wm); \\"
print " } \\"
print " template <cluster_version_t W> \\"
print " MUST_USE archive_result_t deserialize(read_stream_t *s, type_t *thing) { \\"
print " return thing->template rdb_deserialize<W>(s); \\"
print " } \\"
print " extern int dont_use_RDB_SERIALIZE_OUTSIDE_within_a_class_body"
print
print "#define RDB_SERIALIZE_TEMPLATED_OUTSIDE(type_t) \\"
print " template <cluster_version_t W, class T> \\"
print " void serialize(write_message_t *wm, const type_t<T> &thing) { \\"
print " thing.template rdb_serialize<W>(wm); \\"
print " } \\"
print " template <cluster_version_t W, class T> \\"
print " MUST_USE archive_result_t deserialize(read_stream_t *s, type_t<T> *thing) { \\"
print " return thing->template rdb_deserialize<W>(s); \\"
print " } \\"
print " extern int dont_use_RDB_SERIALIZE_OUTSIDE_within_a_class_body"
print
print "#define RDB_SERIALIZE_TEMPLATED_2_OUTSIDE(type_t) \\"
print " template <cluster_version_t W, class T, class U> \\"
print " void serialize(write_message_t *wm, const type_t<T, U> &thing) { \\"
print " thing.template rdb_serialize<W>(wm); \\"
print " } \\"
print " template <cluster_version_t W, class T, class U> \\"
print " MUST_USE archive_result_t deserialize(read_stream_t *s, type_t<T, U> *thing) { \\"
print " return thing->template rdb_deserialize<W>(s); \\"
print " } \\"
print " extern int dont_use_RDB_SERIALIZE_OUTSIDE_within_a_class_body"
print
for nfields in xrange(20):
generate_make_serializable_macro(nfields)
print
generate_make_me_serializable_macro(nfields)
print
generate_impl_me_serializable_macro(nfields)
print
print "#endif // RPC_SERIALIZE_MACROS_HPP_"
| agpl-3.0 | 6,451,273,809,676,396,000 | 47.692029 | 130 | 0.603393 | false |
GinnyN/towerofdimensions-django | tests/regressiontests/localflavor/pl/tests.py | 33 | 22496 | from django.contrib.localflavor.pl.forms import (PLProvinceSelect,
PLCountySelect, PLPostalCodeField, PLNIPField, PLPESELField, PLNationalIDCardNumberField, PLREGONField)
from django.test import SimpleTestCase
class PLLocalFlavorTests(SimpleTestCase):
def test_PLProvinceSelect(self):
f = PLProvinceSelect()
out = u'''<select name="voivodeships">
<option value="lower_silesia">Lower Silesia</option>
<option value="kuyavia-pomerania">Kuyavia-Pomerania</option>
<option value="lublin">Lublin</option>
<option value="lubusz">Lubusz</option>
<option value="lodz">Lodz</option>
<option value="lesser_poland">Lesser Poland</option>
<option value="masovia">Masovia</option>
<option value="opole">Opole</option>
<option value="subcarpatia">Subcarpatia</option>
<option value="podlasie">Podlasie</option>
<option value="pomerania" selected="selected">Pomerania</option>
<option value="silesia">Silesia</option>
<option value="swietokrzyskie">Swietokrzyskie</option>
<option value="warmia-masuria">Warmia-Masuria</option>
<option value="greater_poland">Greater Poland</option>
<option value="west_pomerania">West Pomerania</option>
</select>'''
self.assertHTMLEqual(f.render('voivodeships', 'pomerania'), out)
def test_PLCountrySelect(self):
f = PLCountySelect()
out = u'''<select name="administrativeunit">
<option value="wroclaw">Wroc\u0142aw</option>
<option value="jeleniagora">Jelenia G\xf3ra</option>
<option value="legnica">Legnica</option>
<option value="boleslawiecki">boles\u0142awiecki</option>
<option value="dzierzoniowski">dzier\u017coniowski</option>
<option value="glogowski">g\u0142ogowski</option>
<option value="gorowski">g\xf3rowski</option>
<option value="jaworski">jaworski</option>
<option value="jeleniogorski">jeleniog\xf3rski</option>
<option value="kamiennogorski">kamiennog\xf3rski</option>
<option value="klodzki">k\u0142odzki</option>
<option value="legnicki">legnicki</option>
<option value="lubanski">luba\u0144ski</option>
<option value="lubinski">lubi\u0144ski</option>
<option value="lwowecki">lw\xf3wecki</option>
<option value="milicki">milicki</option>
<option value="olesnicki">ole\u015bnicki</option>
<option value="olawski">o\u0142awski</option>
<option value="polkowicki">polkowicki</option>
<option value="strzelinski">strzeli\u0144ski</option>
<option value="sredzki">\u015bredzki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="trzebnicki">trzebnicki</option>
<option value="walbrzyski">wa\u0142brzyski</option>
<option value="wolowski">wo\u0142owski</option>
<option value="wroclawski">wroc\u0142awski</option>
<option value="zabkowicki">z\u0105bkowicki</option>
<option value="zgorzelecki">zgorzelecki</option>
<option value="zlotoryjski">z\u0142otoryjski</option>
<option value="bydgoszcz">Bydgoszcz</option>
<option value="torun">Toru\u0144</option>
<option value="wloclawek">W\u0142oc\u0142awek</option>
<option value="grudziadz">Grudzi\u0105dz</option>
<option value="aleksandrowski">aleksandrowski</option>
<option value="brodnicki">brodnicki</option>
<option value="bydgoski">bydgoski</option>
<option value="chelminski">che\u0142mi\u0144ski</option>
<option value="golubsko-dobrzynski">golubsko-dobrzy\u0144ski</option>
<option value="grudziadzki">grudzi\u0105dzki</option>
<option value="inowroclawski">inowroc\u0142awski</option>
<option value="lipnowski">lipnowski</option>
<option value="mogilenski">mogile\u0144ski</option>
<option value="nakielski">nakielski</option>
<option value="radziejowski">radziejowski</option>
<option value="rypinski">rypi\u0144ski</option>
<option value="sepolenski">s\u0119pole\u0144ski</option>
<option value="swiecki">\u015bwiecki</option>
<option value="torunski">toru\u0144ski</option>
<option value="tucholski">tucholski</option>
<option value="wabrzeski">w\u0105brzeski</option>
<option value="wloclawski">wroc\u0142awski</option>
<option value="zninski">\u017ani\u0144ski</option>
<option value="lublin">Lublin</option>
<option value="biala-podlaska">Bia\u0142a Podlaska</option>
<option value="chelm">Che\u0142m</option>
<option value="zamosc">Zamo\u015b\u0107</option>
<option value="bialski">bialski</option>
<option value="bilgorajski">bi\u0142gorajski</option>
<option value="chelmski">che\u0142mski</option>
<option value="hrubieszowski">hrubieszowski</option>
<option value="janowski">janowski</option>
<option value="krasnostawski">krasnostawski</option>
<option value="krasnicki">kra\u015bnicki</option>
<option value="lubartowski">lubartowski</option>
<option value="lubelski">lubelski</option>
<option value="leczynski">\u0142\u0119czy\u0144ski</option>
<option value="lukowski">\u0142ukowski</option>
<option value="opolski">opolski</option>
<option value="parczewski">parczewski</option>
<option value="pulawski">pu\u0142awski</option>
<option value="radzynski">radzy\u0144ski</option>
<option value="rycki">rycki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wlodawski">w\u0142odawski</option>
<option value="zamojski">zamojski</option>
<option value="gorzow-wielkopolski">Gorz\xf3w Wielkopolski</option>
<option value="zielona-gora">Zielona G\xf3ra</option>
<option value="gorzowski">gorzowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="miedzyrzecki">mi\u0119dzyrzecki</option>
<option value="nowosolski">nowosolski</option>
<option value="slubicki">s\u0142ubicki</option>
<option value="strzelecko-drezdenecki">strzelecko-drezdenecki</option>
<option value="sulecinski">sule\u0144ci\u0144ski</option>
<option value="swiebodzinski">\u015bwiebodzi\u0144ski</option>
<option value="wschowski">wschowski</option>
<option value="zielonogorski">zielonog\xf3rski</option>
<option value="zaganski">\u017caga\u0144ski</option>
<option value="zarski">\u017carski</option>
<option value="lodz">\u0141\xf3d\u017a</option>
<option value="piotrkow-trybunalski">Piotrk\xf3w Trybunalski</option>
<option value="skierniewice">Skierniewice</option>
<option value="belchatowski">be\u0142chatowski</option>
<option value="brzezinski">brzezi\u0144ski</option>
<option value="kutnowski">kutnowski</option>
<option value="laski">\u0142aski</option>
<option value="leczycki">\u0142\u0119czycki</option>
<option value="lowicki">\u0142owicki</option>
<option value="lodzki wschodni">\u0142\xf3dzki wschodni</option>
<option value="opoczynski">opoczy\u0144ski</option>
<option value="pabianicki">pabianicki</option>
<option value="pajeczanski">paj\u0119cza\u0144ski</option>
<option value="piotrkowski">piotrkowski</option>
<option value="poddebicki">podd\u0119bicki</option>
<option value="radomszczanski">radomszcza\u0144ski</option>
<option value="rawski">rawski</option>
<option value="sieradzki">sieradzki</option>
<option value="skierniewicki">skierniewicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wielunski">wielu\u0144ski</option>
<option value="wieruszowski">wieruszowski</option>
<option value="zdunskowolski">zdu\u0144skowolski</option>
<option value="zgierski">zgierski</option>
<option value="krakow">Krak\xf3w</option>
<option value="tarnow">Tarn\xf3w</option>
<option value="nowy-sacz">Nowy S\u0105cz</option>
<option value="bochenski">boche\u0144ski</option>
<option value="brzeski">brzeski</option>
<option value="chrzanowski">chrzanowski</option>
<option value="dabrowski">d\u0105browski</option>
<option value="gorlicki">gorlicki</option>
<option value="krakowski">krakowski</option>
<option value="limanowski">limanowski</option>
<option value="miechowski">miechowski</option>
<option value="myslenicki">my\u015blenicki</option>
<option value="nowosadecki">nowos\u0105decki</option>
<option value="nowotarski">nowotarski</option>
<option value="olkuski">olkuski</option>
<option value="oswiecimski">o\u015bwi\u0119cimski</option>
<option value="proszowicki">proszowicki</option>
<option value="suski">suski</option>
<option value="tarnowski">tarnowski</option>
<option value="tatrzanski">tatrza\u0144ski</option>
<option value="wadowicki">wadowicki</option>
<option value="wielicki">wielicki</option>
<option value="warszawa">Warszawa</option>
<option value="ostroleka">Ostro\u0142\u0119ka</option>
<option value="plock">P\u0142ock</option>
<option value="radom">Radom</option>
<option value="siedlce">Siedlce</option>
<option value="bialobrzeski">bia\u0142obrzeski</option>
<option value="ciechanowski">ciechanowski</option>
<option value="garwolinski">garwoli\u0144ski</option>
<option value="gostyninski">gostyni\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="grojecki">gr\xf3jecki</option>
<option value="kozienicki">kozenicki</option>
<option value="legionowski">legionowski</option>
<option value="lipski">lipski</option>
<option value="losicki">\u0142osicki</option>
<option value="makowski">makowski</option>
<option value="minski">mi\u0144ski</option>
<option value="mlawski">m\u0142awski</option>
<option value="nowodworski">nowodworski</option>
<option value="ostrolecki">ostro\u0142\u0119cki</option>
<option value="ostrowski">ostrowski</option>
<option value="otwocki">otwocki</option>
<option value="piaseczynski">piaseczy\u0144ski</option>
<option value="plocki">p\u0142ocki</option>
<option value="plonski">p\u0142o\u0144ski</option>
<option value="pruszkowski">pruszkowski</option>
<option value="przasnyski">przasnyski</option>
<option value="przysuski">przysuski</option>
<option value="pultuski">pu\u0142tuski</option>
<option value="radomski">radomski</option>
<option value="siedlecki">siedlecki</option>
<option value="sierpecki">sierpecki</option>
<option value="sochaczewski">sochaczewski</option>
<option value="sokolowski">soko\u0142owski</option>
<option value="szydlowiecki">szyd\u0142owiecki</option>
<option value="warszawski-zachodni">warszawski zachodni</option>
<option value="wegrowski">w\u0119growski</option>
<option value="wolominski">wo\u0142omi\u0144ski</option>
<option value="wyszkowski">wyszkowski</option>
<option value="zwolenski">zwole\u0144ski</option>
<option value="zurominski">\u017curomi\u0144ski</option>
<option value="zyrardowski">\u017cyrardowski</option>
<option value="opole">Opole</option>
<option value="brzeski">brzeski</option>
<option value="glubczycki">g\u0142ubczyski</option>
<option value="kedzierzynsko-kozielski">k\u0119dzierzy\u0144ski-kozielski</option>
<option value="kluczborski">kluczborski</option>
<option value="krapkowicki">krapkowicki</option>
<option value="namyslowski">namys\u0142owski</option>
<option value="nyski">nyski</option>
<option value="oleski">oleski</option>
<option value="opolski">opolski</option>
<option value="prudnicki">prudnicki</option>
<option value="strzelecki">strzelecki</option>
<option value="rzeszow">Rzesz\xf3w</option>
<option value="krosno">Krosno</option>
<option value="przemysl">Przemy\u015bl</option>
<option value="tarnobrzeg">Tarnobrzeg</option>
<option value="bieszczadzki">bieszczadzki</option>
<option value="brzozowski">brzozowski</option>
<option value="debicki">d\u0119bicki</option>
<option value="jaroslawski">jaros\u0142awski</option>
<option value="jasielski">jasielski</option>
<option value="kolbuszowski">kolbuszowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="leski">leski</option>
<option value="lezajski">le\u017cajski</option>
<option value="lubaczowski">lubaczowski</option>
<option value="lancucki">\u0142a\u0144cucki</option>
<option value="mielecki">mielecki</option>
<option value="nizanski">ni\u017ca\u0144ski</option>
<option value="przemyski">przemyski</option>
<option value="przeworski">przeworski</option>
<option value="ropczycko-sedziszowski">ropczycko-s\u0119dziszowski</option>
<option value="rzeszowski">rzeszowski</option>
<option value="sanocki">sanocki</option>
<option value="stalowowolski">stalowowolski</option>
<option value="strzyzowski">strzy\u017cowski</option>
<option value="tarnobrzeski">tarnobrzeski</option>
<option value="bialystok">Bia\u0142ystok</option>
<option value="lomza">\u0141om\u017ca</option>
<option value="suwalki">Suwa\u0142ki</option>
<option value="augustowski">augustowski</option>
<option value="bialostocki">bia\u0142ostocki</option>
<option value="bielski">bielski</option>
<option value="grajewski">grajewski</option>
<option value="hajnowski">hajnowski</option>
<option value="kolnenski">kolne\u0144ski</option>
<option value="\u0142omzynski">\u0142om\u017cy\u0144ski</option>
<option value="moniecki">moniecki</option>
<option value="sejnenski">sejne\u0144ski</option>
<option value="siemiatycki">siematycki</option>
<option value="sokolski">sok\xf3lski</option>
<option value="suwalski">suwalski</option>
<option value="wysokomazowiecki">wysokomazowiecki</option>
<option value="zambrowski">zambrowski</option>
<option value="gdansk">Gda\u0144sk</option>
<option value="gdynia">Gdynia</option>
<option value="slupsk">S\u0142upsk</option>
<option value="sopot">Sopot</option>
<option value="bytowski">bytowski</option>
<option value="chojnicki">chojnicki</option>
<option value="czluchowski">cz\u0142uchowski</option>
<option value="kartuski">kartuski</option>
<option value="koscierski">ko\u015bcierski</option>
<option value="kwidzynski">kwidzy\u0144ski</option>
<option value="leborski">l\u0119borski</option>
<option value="malborski">malborski</option>
<option value="nowodworski">nowodworski</option>
<option value="gdanski">gda\u0144ski</option>
<option value="pucki">pucki</option>
<option value="slupski">s\u0142upski</option>
<option value="starogardzki">starogardzki</option>
<option value="sztumski">sztumski</option>
<option value="tczewski">tczewski</option>
<option value="wejherowski">wejcherowski</option>
<option value="katowice" selected="selected">Katowice</option>
<option value="bielsko-biala">Bielsko-Bia\u0142a</option>
<option value="bytom">Bytom</option>
<option value="chorzow">Chorz\xf3w</option>
<option value="czestochowa">Cz\u0119stochowa</option>
<option value="dabrowa-gornicza">D\u0105browa G\xf3rnicza</option>
<option value="gliwice">Gliwice</option>
<option value="jastrzebie-zdroj">Jastrz\u0119bie Zdr\xf3j</option>
<option value="jaworzno">Jaworzno</option>
<option value="myslowice">Mys\u0142owice</option>
<option value="piekary-slaskie">Piekary \u015al\u0105skie</option>
<option value="ruda-slaska">Ruda \u015al\u0105ska</option>
<option value="rybnik">Rybnik</option>
<option value="siemianowice-slaskie">Siemianowice \u015al\u0105skie</option>
<option value="sosnowiec">Sosnowiec</option>
<option value="swietochlowice">\u015awi\u0119toch\u0142owice</option>
<option value="tychy">Tychy</option>
<option value="zabrze">Zabrze</option>
<option value="zory">\u017bory</option>
<option value="bedzinski">b\u0119dzi\u0144ski</option>
<option value="bielski">bielski</option>
<option value="bierunsko-ledzinski">bieru\u0144sko-l\u0119dzi\u0144ski</option>
<option value="cieszynski">cieszy\u0144ski</option>
<option value="czestochowski">cz\u0119stochowski</option>
<option value="gliwicki">gliwicki</option>
<option value="klobucki">k\u0142obucki</option>
<option value="lubliniecki">lubliniecki</option>
<option value="mikolowski">miko\u0142owski</option>
<option value="myszkowski">myszkowski</option>
<option value="pszczynski">pszczy\u0144ski</option>
<option value="raciborski">raciborski</option>
<option value="rybnicki">rybnicki</option>
<option value="tarnogorski">tarnog\xf3rski</option>
<option value="wodzislawski">wodzis\u0142awski</option>
<option value="zawiercianski">zawiercia\u0144ski</option>
<option value="zywiecki">\u017cywiecki</option>
<option value="kielce">Kielce</option>
<option value="buski">buski</option>
<option value="jedrzejowski">j\u0119drzejowski</option>
<option value="kazimierski">kazimierski</option>
<option value="kielecki">kielecki</option>
<option value="konecki">konecki</option>
<option value="opatowski">opatowski</option>
<option value="ostrowiecki">ostrowiecki</option>
<option value="pinczowski">pi\u0144czowski</option>
<option value="sandomierski">sandomierski</option>
<option value="skarzyski">skar\u017cyski</option>
<option value="starachowicki">starachowicki</option>
<option value="staszowski">staszowski</option>
<option value="wloszczowski">w\u0142oszczowski</option>
<option value="olsztyn">Olsztyn</option>
<option value="elblag">Elbl\u0105g</option>
<option value="bartoszycki">bartoszycki</option>
<option value="braniewski">braniewski</option>
<option value="dzialdowski">dzia\u0142dowski</option>
<option value="elblaski">elbl\u0105ski</option>
<option value="elcki">e\u0142cki</option>
<option value="gizycki">gi\u017cycki</option>
<option value="goldapski">go\u0142dapski</option>
<option value="ilawski">i\u0142awski</option>
<option value="ketrzynski">k\u0119trzy\u0144ski</option>
<option value="lidzbarski">lidzbarski</option>
<option value="mragowski">mr\u0105gowski</option>
<option value="nidzicki">nidzicki</option>
<option value="nowomiejski">nowomiejski</option>
<option value="olecki">olecki</option>
<option value="olsztynski">olszty\u0144ski</option>
<option value="ostrodzki">ostr\xf3dzki</option>
<option value="piski">piski</option>
<option value="szczycienski">szczycie\u0144ski</option>
<option value="wegorzewski">w\u0119gorzewski</option>
<option value="poznan">Pozna\u0144</option>
<option value="kalisz">Kalisz</option>
<option value="konin">Konin</option>
<option value="leszno">Leszno</option>
<option value="chodzieski">chodziejski</option>
<option value="czarnkowsko-trzcianecki">czarnkowsko-trzcianecki</option>
<option value="gnieznienski">gnie\u017anie\u0144ski</option>
<option value="gostynski">gosty\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="jarocinski">jaroci\u0144ski</option>
<option value="kaliski">kaliski</option>
<option value="kepinski">k\u0119pi\u0144ski</option>
<option value="kolski">kolski</option>
<option value="koninski">koni\u0144ski</option>
<option value="koscianski">ko\u015bcia\u0144ski</option>
<option value="krotoszynski">krotoszy\u0144ski</option>
<option value="leszczynski">leszczy\u0144ski</option>
<option value="miedzychodzki">mi\u0119dzychodzki</option>
<option value="nowotomyski">nowotomyski</option>
<option value="obornicki">obornicki</option>
<option value="ostrowski">ostrowski</option>
<option value="ostrzeszowski">ostrzeszowski</option>
<option value="pilski">pilski</option>
<option value="pleszewski">pleszewski</option>
<option value="poznanski">pozna\u0144ski</option>
<option value="rawicki">rawicki</option>
<option value="slupecki">s\u0142upecki</option>
<option value="szamotulski">szamotulski</option>
<option value="sredzki">\u015bredzki</option>
<option value="sremski">\u015bremski</option>
<option value="turecki">turecki</option>
<option value="wagrowiecki">w\u0105growiecki</option>
<option value="wolsztynski">wolszty\u0144ski</option>
<option value="wrzesinski">wrzesi\u0144ski</option>
<option value="zlotowski">z\u0142otowski</option>
<option value="bialogardzki">bia\u0142ogardzki</option>
<option value="choszczenski">choszcze\u0144ski</option>
<option value="drawski">drawski</option>
<option value="goleniowski">goleniowski</option>
<option value="gryficki">gryficki</option>
<option value="gryfinski">gryfi\u0144ski</option>
<option value="kamienski">kamie\u0144ski</option>
<option value="kolobrzeski">ko\u0142obrzeski</option>
<option value="koszalinski">koszali\u0144ski</option>
<option value="lobeski">\u0142obeski</option>
<option value="mysliborski">my\u015bliborski</option>
<option value="policki">policki</option>
<option value="pyrzycki">pyrzycki</option>
<option value="slawienski">s\u0142awie\u0144ski</option>
<option value="stargardzki">stargardzki</option>
<option value="szczecinecki">szczecinecki</option>
<option value="swidwinski">\u015bwidwi\u0144ski</option>
<option value="walecki">wa\u0142ecki</option>
</select>'''
self.assertHTMLEqual(f.render('administrativeunit', 'katowice'), out)
def test_PLPostalCodeField(self):
error_format = [u'Enter a postal code in the format XX-XXX.']
valid = {
'41-403': '41-403',
}
invalid = {
'43--434': error_format,
}
self.assertFieldOutput(PLPostalCodeField, valid, invalid)
def test_PLNIPField(self):
error_format = [u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX, XXX-XX-XX-XXX or XXXXXXXXXX.']
error_checksum = [u'Wrong checksum for the Tax Number (NIP).']
valid = {
'646-241-41-24': '6462414124',
'646-24-14-124': '6462414124',
'6462414124': '6462414124',
}
invalid = {
'43-343-234-323': error_format,
'64-62-414-124': error_format,
'646-241-41-23': error_checksum,
}
self.assertFieldOutput(PLNIPField, valid, invalid)
def test_PLPESELField(self):
error_checksum = [u'Wrong checksum for the National Identification Number.']
error_format = [u'National Identification Number consists of 11 digits.']
valid = {
'80071610614': '80071610614',
}
invalid = {
'80071610610': error_checksum,
'80': error_format,
'800716106AA': error_format,
}
self.assertFieldOutput(PLPESELField, valid, invalid)
def test_PLNationalIDCardNumberField(self):
error_checksum = [u'Wrong checksum for the National ID Card Number.']
error_format = [u'National ID Card Number consists of 3 letters and 6 digits.']
valid = {
'ABC123458': 'ABC123458',
'abc123458': 'ABC123458',
}
invalid = {
'ABC123457': error_checksum,
'abc123457': error_checksum,
'a12Aaaaaa': error_format,
'AA1234443': error_format,
}
self.assertFieldOutput(PLNationalIDCardNumberField, valid, invalid)
def test_PLREGONField(self):
error_checksum = [u'Wrong checksum for the National Business Register Number (REGON).']
error_format = [u'National Business Register Number (REGON) consists of 9 or 14 digits.']
valid = {
'12345678512347': '12345678512347',
'590096454': '590096454',
}
invalid = {
'123456784': error_checksum,
'12345678412342': error_checksum,
'590096453': error_checksum,
'590096': error_format,
}
self.assertFieldOutput(PLREGONField, valid, invalid)
| bsd-3-clause | -7,811,433,784,382,097,000 | 46.062762 | 116 | 0.755201 | false |
nimeshkumar11/Implementation-of-Adaptive-CoDel-in-ns-3 | src/flow-monitor/bindings/modulegen__gcc_ILP32.py | 14 | 453979 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.flow_monitor', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper [class]
module.add_class('FlowMonitorHelper')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## histogram.h (module 'flow-monitor'): ns3::Histogram [class]
module.add_class('Histogram')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class]
module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration]
module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration]
module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class]
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration]
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv6-header.h (module 'internet'): ns3::Ipv6Header [class]
module.add_class('Ipv6Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::NextHeader_e [enumeration]
module.add_enum('NextHeader_e', ['IPV6_EXT_HOP_BY_HOP', 'IPV6_IPV4', 'IPV6_TCP', 'IPV6_UDP', 'IPV6_IPV6', 'IPV6_EXT_ROUTING', 'IPV6_EXT_FRAGMENTATION', 'IPV6_EXT_CONFIDENTIALITY', 'IPV6_EXT_AUTHENTIFICATION', 'IPV6_ICMPV6', 'IPV6_EXT_END', 'IPV6_EXT_DESTINATION', 'IPV6_SCTP', 'IPV6_EXT_MOBILITY', 'IPV6_UDP_LITE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::EcnType [enumeration]
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FlowClassifier', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FlowClassifier>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketPriority [enumeration]
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::Ipv6MulticastFilterMode [enumeration]
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketIpTosTag [class]
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class]
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class]
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketPriorityTag [class]
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## flow-classifier.h (module 'flow-monitor'): ns3::FlowClassifier [class]
module.add_class('FlowClassifier', parent=root_module['ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >'])
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor [class]
module.add_class('FlowMonitor', parent=root_module['ns3::Object'])
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats [struct]
module.add_class('FlowStats', outer_class=root_module['ns3::FlowMonitor'])
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe [class]
module.add_class('FlowProbe', parent=root_module['ns3::Object'])
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats [struct]
module.add_class('FlowStats', outer_class=root_module['ns3::FlowProbe'])
## ipv4.h (module 'internet'): ns3::Ipv4 [class]
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier [class]
module.add_class('Ipv4FlowClassifier', parent=root_module['ns3::FlowClassifier'])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple [struct]
module.add_class('FiveTuple', outer_class=root_module['ns3::Ipv4FlowClassifier'])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::SortByCount [class]
module.add_class('SortByCount', outer_class=root_module['ns3::Ipv4FlowClassifier'])
## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe [class]
module.add_class('Ipv4FlowProbe', parent=root_module['ns3::FlowProbe'])
## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_NO_ROUTE', 'DROP_TTL_EXPIRE', 'DROP_BAD_CHECKSUM', 'DROP_QUEUE', 'DROP_QUEUE_DISC', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT', 'DROP_INVALID_REASON'], outer_class=root_module['ns3::Ipv4FlowProbe'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class]
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class]
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class]
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class]
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6.h (module 'internet'): ns3::Ipv6 [class]
module.add_class('Ipv6', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier [class]
module.add_class('Ipv6FlowClassifier', parent=root_module['ns3::FlowClassifier'])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple [struct]
module.add_class('FiveTuple', outer_class=root_module['ns3::Ipv6FlowClassifier'])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::SortByCount [class]
module.add_class('SortByCount', outer_class=root_module['ns3::Ipv6FlowClassifier'])
## ipv6-flow-probe.h (module 'flow-monitor'): ns3::Ipv6FlowProbe [class]
module.add_class('Ipv6FlowProbe', parent=root_module['ns3::FlowProbe'])
## ipv6-flow-probe.h (module 'flow-monitor'): ns3::Ipv6FlowProbe::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_NO_ROUTE', 'DROP_TTL_EXPIRE', 'DROP_BAD_CHECKSUM', 'DROP_QUEUE', 'DROP_QUEUE_DISC', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_UNKNOWN_PROTOCOL', 'DROP_UNKNOWN_OPTION', 'DROP_MALFORMED_HEADER', 'DROP_FRAGMENT_TIMEOUT', 'DROP_INVALID_REASON'], outer_class=root_module['ns3::Ipv6FlowProbe'])
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6L3Protocol [class]
module.add_class('Ipv6L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv6'])
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6L3Protocol::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_UNKNOWN_PROTOCOL', 'DROP_UNKNOWN_OPTION', 'DROP_MALFORMED_HEADER', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv6L3Protocol'], import_from_module='ns.internet')
## ipv6-pmtu-cache.h (module 'internet'): ns3::Ipv6PmtuCache [class]
module.add_class('Ipv6PmtuCache', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## queue-item.h (module 'network'): ns3::QueueItem [class]
module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
## queue-item.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration]
module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## queue-item.h (module 'network'): ns3::QueueDiscItem [class]
module.add_class('QueueDiscItem', import_from_module='ns.network', parent=root_module['ns3::QueueItem'])
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::vector< unsigned int >', 'unsigned int', container_type=u'vector')
module.add_container('std::vector< unsigned long long >', 'long long unsigned int', container_type=u'vector')
module.add_container('std::map< unsigned int, ns3::FlowMonitor::FlowStats >', ('unsigned int', 'ns3::FlowMonitor::FlowStats'), container_type=u'map')
module.add_container('std::vector< ns3::Ptr< ns3::FlowProbe > >', 'ns3::Ptr< ns3::FlowProbe >', container_type=u'vector')
module.add_container('std::map< unsigned int, ns3::FlowProbe::FlowStats >', ('unsigned int', 'ns3::FlowProbe::FlowStats'), container_type=u'map')
module.add_container('std::vector< std::pair< ns3::Ipv4Header::DscpType, unsigned int > >', 'std::pair< ns3::Ipv4Header::DscpType, unsigned int >', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
module.add_container('std::vector< std::pair< ns3::Ipv6Header::DscpType, unsigned int > >', 'std::pair< ns3::Ipv6Header::DscpType, unsigned int >', container_type=u'vector')
typehandlers.add_type_alias(u'uint32_t', u'ns3::FlowPacketId')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::FlowPacketId*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::FlowPacketId&')
typehandlers.add_type_alias(u'uint32_t', u'ns3::FlowId')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::FlowId*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::FlowId&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3FlowMonitorHelper_methods(root_module, root_module['ns3::FlowMonitorHelper'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Histogram_methods(root_module, root_module['ns3::Histogram'])
register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress'])
register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6InterfaceAddress_methods(root_module, root_module['ns3::Ipv6InterfaceAddress'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Ipv6Header_methods(root_module, root_module['ns3::Ipv6Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag'])
register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag'])
register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3FlowClassifier_methods(root_module, root_module['ns3::FlowClassifier'])
register_Ns3FlowMonitor_methods(root_module, root_module['ns3::FlowMonitor'])
register_Ns3FlowMonitorFlowStats_methods(root_module, root_module['ns3::FlowMonitor::FlowStats'])
register_Ns3FlowProbe_methods(root_module, root_module['ns3::FlowProbe'])
register_Ns3FlowProbeFlowStats_methods(root_module, root_module['ns3::FlowProbe::FlowStats'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4FlowClassifier_methods(root_module, root_module['ns3::Ipv4FlowClassifier'])
register_Ns3Ipv4FlowClassifierFiveTuple_methods(root_module, root_module['ns3::Ipv4FlowClassifier::FiveTuple'])
register_Ns3Ipv4FlowClassifierSortByCount_methods(root_module, root_module['ns3::Ipv4FlowClassifier::SortByCount'])
register_Ns3Ipv4FlowProbe_methods(root_module, root_module['ns3::Ipv4FlowProbe'])
register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv6_methods(root_module, root_module['ns3::Ipv6'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6FlowClassifier_methods(root_module, root_module['ns3::Ipv6FlowClassifier'])
register_Ns3Ipv6FlowClassifierFiveTuple_methods(root_module, root_module['ns3::Ipv6FlowClassifier::FiveTuple'])
register_Ns3Ipv6FlowClassifierSortByCount_methods(root_module, root_module['ns3::Ipv6FlowClassifier::SortByCount'])
register_Ns3Ipv6FlowProbe_methods(root_module, root_module['ns3::Ipv6FlowProbe'])
register_Ns3Ipv6L3Protocol_methods(root_module, root_module['ns3::Ipv6L3Protocol'])
register_Ns3Ipv6PmtuCache_methods(root_module, root_module['ns3::Ipv6PmtuCache'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3QueueDiscItem_methods(root_module, root_module['ns3::QueueDiscItem'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3FlowMonitorHelper_methods(root_module, cls):
## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper::FlowMonitorHelper() [constructor]
cls.add_constructor([])
## flow-monitor-helper.h (module 'flow-monitor'): void ns3::FlowMonitorHelper::SetMonitorAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetMonitorAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::Install(ns3::NodeContainer nodes) [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::FlowMonitor >',
[param('ns3::NodeContainer', 'nodes')])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::Install(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::FlowMonitor >',
[param('ns3::Ptr< ns3::Node >', 'node')])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::InstallAll() [member function]
cls.add_method('InstallAll',
'ns3::Ptr< ns3::FlowMonitor >',
[])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::GetMonitor() [member function]
cls.add_method('GetMonitor',
'ns3::Ptr< ns3::FlowMonitor >',
[])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowClassifier> ns3::FlowMonitorHelper::GetClassifier() [member function]
cls.add_method('GetClassifier',
'ns3::Ptr< ns3::FlowClassifier >',
[])
## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowClassifier> ns3::FlowMonitorHelper::GetClassifier6() [member function]
cls.add_method('GetClassifier6',
'ns3::Ptr< ns3::FlowClassifier >',
[])
## flow-monitor-helper.h (module 'flow-monitor'): void ns3::FlowMonitorHelper::SerializeToXmlStream(std::ostream & os, uint16_t indent, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
## flow-monitor-helper.h (module 'flow-monitor'): std::string ns3::FlowMonitorHelper::SerializeToXmlString(uint16_t indent, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlString',
'std::string',
[param('uint16_t', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
## flow-monitor-helper.h (module 'flow-monitor'): void ns3::FlowMonitorHelper::SerializeToXmlFile(std::string fileName, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlFile',
'void',
[param('std::string', 'fileName'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Histogram_methods(root_module, cls):
## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram(ns3::Histogram const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Histogram const &', 'arg0')])
## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram(double binWidth) [constructor]
cls.add_constructor([param('double', 'binWidth')])
## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram() [constructor]
cls.add_constructor([])
## histogram.h (module 'flow-monitor'): void ns3::Histogram::AddValue(double value) [member function]
cls.add_method('AddValue',
'void',
[param('double', 'value')])
## histogram.h (module 'flow-monitor'): uint32_t ns3::Histogram::GetBinCount(uint32_t index) [member function]
cls.add_method('GetBinCount',
'uint32_t',
[param('uint32_t', 'index')])
## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinEnd(uint32_t index) [member function]
cls.add_method('GetBinEnd',
'double',
[param('uint32_t', 'index')])
## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinStart(uint32_t index) [member function]
cls.add_method('GetBinStart',
'double',
[param('uint32_t', 'index')])
## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinWidth(uint32_t index) const [member function]
cls.add_method('GetBinWidth',
'double',
[param('uint32_t', 'index')],
is_const=True)
## histogram.h (module 'flow-monitor'): uint32_t ns3::Histogram::GetNBins() const [member function]
cls.add_method('GetNBins',
'uint32_t',
[],
is_const=True)
## histogram.h (module 'flow-monitor'): void ns3::Histogram::SerializeToXmlStream(std::ostream & os, uint16_t indent, std::string elementName) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('std::string', 'elementName')],
is_const=True)
## histogram.h (module 'flow-monitor'): void ns3::Histogram::SetDefaultBinWidth(double binWidth) [member function]
cls.add_method('SetDefaultBinWidth',
'void',
[param('double', 'binWidth')])
return
def register_Ns3Inet6SocketAddress_methods(root_module, cls):
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor]
cls.add_constructor([param('char const *', 'ipv6')])
## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function]
cls.add_method('ConvertFrom',
'ns3::Inet6SocketAddress',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function]
cls.add_method('GetIpv6',
'ns3::Ipv6Address',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function]
cls.add_method('SetIpv6',
'void',
[param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
return
def register_Ns3InetSocketAddress_methods(root_module, cls):
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor]
cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor]
cls.add_constructor([param('char const *', 'ipv4')])
## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::InetSocketAddress',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function]
cls.add_method('GetIpv4',
'ns3::Ipv4Address',
[],
is_const=True)
## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet-socket-address.h (module 'network'): uint8_t ns3::InetSocketAddress::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ipv4Address', 'address')])
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor]
cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function]
cls.add_method('GetLocal',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function]
cls.add_method('GetMask',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function]
cls.add_method('IsSecondary',
'bool',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function]
cls.add_method('SetBroadcast',
'void',
[param('ns3::Ipv4Address', 'broadcast')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv4Address', 'local')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function]
cls.add_method('SetPrimary',
'void',
[])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function]
cls.add_method('SetSecondary',
'void',
[])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'address')])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address, ns3::Ipv6Prefix prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'address'), param('ns3::Ipv6Prefix', 'prefix')])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6InterfaceAddress const & o) [copy constructor]
cls.add_constructor([param('ns3::Ipv6InterfaceAddress const &', 'o')])
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6InterfaceAddress::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-interface-address.h (module 'internet'): uint32_t ns3::Ipv6InterfaceAddress::GetNsDadUid() const [member function]
cls.add_method('GetNsDadUid',
'uint32_t',
[],
is_const=True)
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Prefix ns3::Ipv6InterfaceAddress::GetPrefix() const [member function]
cls.add_method('GetPrefix',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e ns3::Ipv6InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv6InterfaceAddress::Scope_e',
[],
is_const=True)
## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e ns3::Ipv6InterfaceAddress::GetState() const [member function]
cls.add_method('GetState',
'ns3::Ipv6InterfaceAddress::State_e',
[],
is_const=True)
## ipv6-interface-address.h (module 'internet'): bool ns3::Ipv6InterfaceAddress::IsInSameSubnet(ns3::Ipv6Address b) const [member function]
cls.add_method('IsInSameSubnet',
'bool',
[param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetAddress(ns3::Ipv6Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Ipv6Address', 'address')])
## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetNsDadUid(uint32_t uid) [member function]
cls.add_method('SetNsDadUid',
'void',
[param('uint32_t', 'uid')])
## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetScope(ns3::Ipv6InterfaceAddress::Scope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv6InterfaceAddress::Scope_e', 'scope')])
## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetState(ns3::Ipv6InterfaceAddress::State_e state) [member function]
cls.add_method('SetState',
'void',
[param('ns3::Ipv6InterfaceAddress::State_e', 'state')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor]
cls.add_constructor([])
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv4Header::DscpType', 'dscp')],
is_const=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv4Header::EcnType', 'ecn')],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function]
cls.add_method('EnableChecksum',
'void',
[])
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv4Header::DscpType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv4Header::EcnType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function]
cls.add_method('GetFragmentOffset',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function]
cls.add_method('GetIdentification',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function]
cls.add_method('GetPayloadSize',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function]
cls.add_method('IsChecksumOk',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function]
cls.add_method('IsDontFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function]
cls.add_method('IsLastFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'destination')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function]
cls.add_method('SetDontFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv4Header::DscpType', 'dscp')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv4Header::EcnType', 'ecn')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint16_t', 'offsetBytes')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function]
cls.add_method('SetIdentification',
'void',
[param('uint16_t', 'identification')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function]
cls.add_method('SetLastFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function]
cls.add_method('SetMayFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function]
cls.add_method('SetMoreFragments',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function]
cls.add_method('SetPayloadSize',
'void',
[param('uint16_t', 'size')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function]
cls.add_method('SetProtocol',
'void',
[param('uint8_t', 'num')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'source')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3Ipv6Header_methods(root_module, cls):
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::Ipv6Header(ns3::Ipv6Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Header const &', 'arg0')])
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::Ipv6Header() [constructor]
cls.add_constructor([])
## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv6-header.h (module 'internet'): std::string ns3::Ipv6Header::DscpTypeToString(ns3::Ipv6Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv6Header::DscpType', 'dscp')],
is_const=True)
## ipv6-header.h (module 'internet'): std::string ns3::Ipv6Header::EcnTypeToString(ns3::Ipv6Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv6Header::EcnType', 'ecn')],
is_const=True)
## ipv6-header.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6Header::GetDestinationAddress() const [member function]
cls.add_method('GetDestinationAddress',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::DscpType ns3::Ipv6Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv6Header::DscpType',
[],
is_const=True)
## ipv6-header.h (module 'internet'): ns3::Ipv6Header::EcnType ns3::Ipv6Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv6Header::EcnType',
[],
is_const=True)
## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::GetFlowLabel() const [member function]
cls.add_method('GetFlowLabel',
'uint32_t',
[],
is_const=True)
## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetHopLimit() const [member function]
cls.add_method('GetHopLimit',
'uint8_t',
[],
is_const=True)
## ipv6-header.h (module 'internet'): ns3::TypeId ns3::Ipv6Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetNextHeader() const [member function]
cls.add_method('GetNextHeader',
'uint8_t',
[],
is_const=True)
## ipv6-header.h (module 'internet'): uint16_t ns3::Ipv6Header::GetPayloadLength() const [member function]
cls.add_method('GetPayloadLength',
'uint16_t',
[],
is_const=True)
## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv6-header.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6Header::GetSourceAddress() const [member function]
cls.add_method('GetSourceAddress',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetTrafficClass() const [member function]
cls.add_method('GetTrafficClass',
'uint8_t',
[],
is_const=True)
## ipv6-header.h (module 'internet'): static ns3::TypeId ns3::Ipv6Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetDestinationAddress(ns3::Ipv6Address dst) [member function]
cls.add_method('SetDestinationAddress',
'void',
[param('ns3::Ipv6Address', 'dst')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetDscp(ns3::Ipv6Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv6Header::DscpType', 'dscp')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetEcn(ns3::Ipv6Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv6Header::EcnType', 'ecn')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetFlowLabel(uint32_t flow) [member function]
cls.add_method('SetFlowLabel',
'void',
[param('uint32_t', 'flow')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetHopLimit(uint8_t limit) [member function]
cls.add_method('SetHopLimit',
'void',
[param('uint8_t', 'limit')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetNextHeader(uint8_t next) [member function]
cls.add_method('SetNextHeader',
'void',
[param('uint8_t', 'next')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetPayloadLength(uint16_t len) [member function]
cls.add_method('SetPayloadLength',
'void',
[param('uint16_t', 'len')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetSourceAddress(ns3::Ipv6Address src) [member function]
cls.add_method('SetSourceAddress',
'void',
[param('ns3::Ipv6Address', 'src')])
## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetTrafficClass(uint8_t traffic) [member function]
cls.add_method('SetTrafficClass',
'void',
[param('uint8_t', 'traffic')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter< ns3::FlowClassifier > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind6() [member function]
cls.add_method('Bind6',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function]
cls.add_method('GetIpTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function]
cls.add_method('GetIpTtl',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function]
cls.add_method('GetIpv6HopLimit',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function]
cls.add_method('GetIpv6Tclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetPeerName(ns3::Address & address) const [member function]
cls.add_method('GetPeerName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): static uint8_t ns3::Socket::IpTos2Priority(uint8_t ipTos) [member function]
cls.add_method('IpTos2Priority',
'uint8_t',
[param('uint8_t', 'ipTos')],
is_static=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address, ns3::Socket::Ipv6MulticastFilterMode filterMode, std::vector<ns3::Ipv6Address,std::allocator<ns3::Ipv6Address> > sourceAddresses) [member function]
cls.add_method('Ipv6JoinGroup',
'void',
[param('ns3::Ipv6Address', 'address'), param('ns3::Socket::Ipv6MulticastFilterMode', 'filterMode'), param('std::vector< ns3::Ipv6Address >', 'sourceAddresses')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address) [member function]
cls.add_method('Ipv6JoinGroup',
'void',
[param('ns3::Ipv6Address', 'address')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6LeaveGroup() [member function]
cls.add_method('Ipv6LeaveGroup',
'void',
[],
is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function]
cls.add_method('IsIpRecvTos',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function]
cls.add_method('IsIpRecvTtl',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function]
cls.add_method('IsIpv6RecvHopLimit',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function]
cls.add_method('IsIpv6RecvTclass',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function]
cls.add_method('IsRecvPktInfo',
'bool',
[],
is_const=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function]
cls.add_method('SetIpRecvTos',
'void',
[param('bool', 'ipv4RecvTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function]
cls.add_method('SetIpRecvTtl',
'void',
[param('bool', 'ipv4RecvTtl')])
## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function]
cls.add_method('SetIpTos',
'void',
[param('uint8_t', 'ipTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function]
cls.add_method('SetIpTtl',
'void',
[param('uint8_t', 'ipTtl')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function]
cls.add_method('SetIpv6HopLimit',
'void',
[param('uint8_t', 'ipHopLimit')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function]
cls.add_method('SetIpv6RecvHopLimit',
'void',
[param('bool', 'ipv6RecvHopLimit')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function]
cls.add_method('SetIpv6RecvTclass',
'void',
[param('bool', 'ipv6RecvTclass')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function]
cls.add_method('SetIpv6Tclass',
'void',
[param('int', 'ipTclass')])
## socket.h (module 'network'): void ns3::Socket::SetPriority(uint8_t priority) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'priority')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function]
cls.add_method('IsManualIpTtl',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function]
cls.add_method('IsManualIpv6HopLimit',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function]
cls.add_method('IsManualIpv6Tclass',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketIpTosTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function]
cls.add_method('GetHopLimit',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function]
cls.add_method('SetHopLimit',
'void',
[param('uint8_t', 'hopLimit')])
return
def register_Ns3SocketIpv6TclassTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function]
cls.add_method('GetTclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function]
cls.add_method('SetTclass',
'void',
[param('uint8_t', 'tclass')])
return
def register_Ns3SocketPriorityTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag(ns3::SocketPriorityTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketPriorityTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketPriorityTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketPriorityTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketPriorityTag::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint32_t ns3::SocketPriorityTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketPriorityTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::SetPriority(uint8_t priority) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'priority')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3FlowClassifier_methods(root_module, cls):
## flow-classifier.h (module 'flow-monitor'): ns3::FlowClassifier::FlowClassifier() [constructor]
cls.add_constructor([])
## flow-classifier.h (module 'flow-monitor'): void ns3::FlowClassifier::SerializeToXmlStream(std::ostream & os, uint16_t indent) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## flow-classifier.h (module 'flow-monitor'): ns3::FlowId ns3::FlowClassifier::GetNewFlowId() [member function]
cls.add_method('GetNewFlowId',
'ns3::FlowId',
[],
visibility='protected')
## flow-classifier.h (module 'flow-monitor'): void ns3::FlowClassifier::Indent(std::ostream & os, uint16_t level) const [member function]
cls.add_method('Indent',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'level')],
is_const=True, visibility='protected')
return
def register_Ns3FlowMonitor_methods(root_module, cls):
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowMonitor(ns3::FlowMonitor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FlowMonitor const &', 'arg0')])
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowMonitor() [constructor]
cls.add_constructor([])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::AddFlowClassifier(ns3::Ptr<ns3::FlowClassifier> classifier) [member function]
cls.add_method('AddFlowClassifier',
'void',
[param('ns3::Ptr< ns3::FlowClassifier >', 'classifier')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::AddProbe(ns3::Ptr<ns3::FlowProbe> probe) [member function]
cls.add_method('AddProbe',
'void',
[param('ns3::Ptr< ns3::FlowProbe >', 'probe')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::CheckForLostPackets() [member function]
cls.add_method('CheckForLostPackets',
'void',
[])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::CheckForLostPackets(ns3::Time maxDelay) [member function]
cls.add_method('CheckForLostPackets',
'void',
[param('ns3::Time', 'maxDelay')])
## flow-monitor.h (module 'flow-monitor'): std::vector<ns3::Ptr<ns3::FlowProbe>, std::allocator<ns3::Ptr<ns3::FlowProbe> > > const & ns3::FlowMonitor::GetAllProbes() const [member function]
cls.add_method('GetAllProbes',
'std::vector< ns3::Ptr< ns3::FlowProbe > > const &',
[],
is_const=True)
## flow-monitor.h (module 'flow-monitor'): std::map<unsigned int, ns3::FlowMonitor::FlowStats, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, ns3::FlowMonitor::FlowStats> > > const & ns3::FlowMonitor::GetFlowStats() const [member function]
cls.add_method('GetFlowStats',
'std::map< unsigned int, ns3::FlowMonitor::FlowStats > const &',
[],
is_const=True)
## flow-monitor.h (module 'flow-monitor'): ns3::TypeId ns3::FlowMonitor::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## flow-monitor.h (module 'flow-monitor'): static ns3::TypeId ns3::FlowMonitor::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportDrop(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize, uint32_t reasonCode) [member function]
cls.add_method('ReportDrop',
'void',
[param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize'), param('uint32_t', 'reasonCode')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportFirstTx(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function]
cls.add_method('ReportFirstTx',
'void',
[param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportForwarding(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function]
cls.add_method('ReportForwarding',
'void',
[param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportLastRx(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function]
cls.add_method('ReportLastRx',
'void',
[param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::SerializeToXmlFile(std::string fileName, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlFile',
'void',
[param('std::string', 'fileName'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::SerializeToXmlStream(std::ostream & os, uint16_t indent, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
## flow-monitor.h (module 'flow-monitor'): std::string ns3::FlowMonitor::SerializeToXmlString(uint16_t indent, bool enableHistograms, bool enableProbes) [member function]
cls.add_method('SerializeToXmlString',
'std::string',
[param('uint16_t', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::Start(ns3::Time const & time) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time const &', 'time')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::StartRightNow() [member function]
cls.add_method('StartRightNow',
'void',
[])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::StopRightNow() [member function]
cls.add_method('StopRightNow',
'void',
[])
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3FlowMonitorFlowStats_methods(root_module, cls):
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::FlowStats() [constructor]
cls.add_constructor([])
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::FlowStats(ns3::FlowMonitor::FlowStats const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FlowMonitor::FlowStats const &', 'arg0')])
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::bytesDropped [variable]
cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long long >', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::delayHistogram [variable]
cls.add_instance_attribute('delayHistogram', 'ns3::Histogram', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::delaySum [variable]
cls.add_instance_attribute('delaySum', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::flowInterruptionsHistogram [variable]
cls.add_instance_attribute('flowInterruptionsHistogram', 'ns3::Histogram', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::jitterHistogram [variable]
cls.add_instance_attribute('jitterHistogram', 'ns3::Histogram', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::jitterSum [variable]
cls.add_instance_attribute('jitterSum', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::lastDelay [variable]
cls.add_instance_attribute('lastDelay', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::lostPackets [variable]
cls.add_instance_attribute('lostPackets', 'uint32_t', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::packetSizeHistogram [variable]
cls.add_instance_attribute('packetSizeHistogram', 'ns3::Histogram', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::packetsDropped [variable]
cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::rxBytes [variable]
cls.add_instance_attribute('rxBytes', 'uint64_t', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::rxPackets [variable]
cls.add_instance_attribute('rxPackets', 'uint32_t', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeFirstRxPacket [variable]
cls.add_instance_attribute('timeFirstRxPacket', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeFirstTxPacket [variable]
cls.add_instance_attribute('timeFirstTxPacket', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeLastRxPacket [variable]
cls.add_instance_attribute('timeLastRxPacket', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeLastTxPacket [variable]
cls.add_instance_attribute('timeLastTxPacket', 'ns3::Time', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timesForwarded [variable]
cls.add_instance_attribute('timesForwarded', 'uint32_t', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::txBytes [variable]
cls.add_instance_attribute('txBytes', 'uint64_t', is_const=False)
## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::txPackets [variable]
cls.add_instance_attribute('txPackets', 'uint32_t', is_const=False)
return
def register_Ns3FlowProbe_methods(root_module, cls):
## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::AddPacketDropStats(ns3::FlowId flowId, uint32_t packetSize, uint32_t reasonCode) [member function]
cls.add_method('AddPacketDropStats',
'void',
[param('ns3::FlowId', 'flowId'), param('uint32_t', 'packetSize'), param('uint32_t', 'reasonCode')])
## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::AddPacketStats(ns3::FlowId flowId, uint32_t packetSize, ns3::Time delayFromFirstProbe) [member function]
cls.add_method('AddPacketStats',
'void',
[param('ns3::FlowId', 'flowId'), param('uint32_t', 'packetSize'), param('ns3::Time', 'delayFromFirstProbe')])
## flow-probe.h (module 'flow-monitor'): std::map<unsigned int, ns3::FlowProbe::FlowStats, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, ns3::FlowProbe::FlowStats> > > ns3::FlowProbe::GetStats() const [member function]
cls.add_method('GetStats',
'std::map< unsigned int, ns3::FlowProbe::FlowStats >',
[],
is_const=True)
## flow-probe.h (module 'flow-monitor'): static ns3::TypeId ns3::FlowProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::SerializeToXmlStream(std::ostream & os, uint16_t indent, uint32_t index) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('uint32_t', 'index')],
is_const=True)
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowProbe(ns3::Ptr<ns3::FlowMonitor> flowMonitor) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::FlowMonitor >', 'flowMonitor')],
visibility='protected')
## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3FlowProbeFlowStats_methods(root_module, cls):
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::FlowStats(ns3::FlowProbe::FlowStats const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FlowProbe::FlowStats const &', 'arg0')])
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::FlowStats() [constructor]
cls.add_constructor([])
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::bytes [variable]
cls.add_instance_attribute('bytes', 'uint64_t', is_const=False)
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::bytesDropped [variable]
cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long long >', is_const=False)
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::delayFromFirstProbeSum [variable]
cls.add_instance_attribute('delayFromFirstProbeSum', 'ns3::Time', is_const=False)
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::packets [variable]
cls.add_instance_attribute('packets', 'uint32_t', is_const=False)
## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::packetsDropped [variable]
cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False)
return
def register_Ns3Ipv4_methods(root_module, cls):
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')])
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor]
cls.add_constructor([])
## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function]
cls.add_method('SourceAddressSelection',
'ns3::Ipv4Address',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4FlowClassifier_methods(root_module, cls):
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::Ipv4FlowClassifier() [constructor]
cls.add_constructor([])
## ipv4-flow-classifier.h (module 'flow-monitor'): bool ns3::Ipv4FlowClassifier::Classify(ns3::Ipv4Header const & ipHeader, ns3::Ptr<const ns3::Packet> ipPayload, uint32_t * out_flowId, uint32_t * out_packetId) [member function]
cls.add_method('Classify',
'bool',
[param('ns3::Ipv4Header const &', 'ipHeader'), param('ns3::Ptr< ns3::Packet const >', 'ipPayload'), param('uint32_t *', 'out_flowId'), param('uint32_t *', 'out_packetId')])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple ns3::Ipv4FlowClassifier::FindFlow(ns3::FlowId flowId) const [member function]
cls.add_method('FindFlow',
'ns3::Ipv4FlowClassifier::FiveTuple',
[param('ns3::FlowId', 'flowId')],
is_const=True)
## ipv4-flow-classifier.h (module 'flow-monitor'): std::vector<std::pair<ns3::Ipv4Header::DscpType, unsigned int>,std::allocator<std::pair<ns3::Ipv4Header::DscpType, unsigned int> > > ns3::Ipv4FlowClassifier::GetDscpCounts(ns3::FlowId flowId) const [member function]
cls.add_method('GetDscpCounts',
'std::vector< std::pair< ns3::Ipv4Header::DscpType, unsigned int > >',
[param('ns3::FlowId', 'flowId')],
is_const=True)
## ipv4-flow-classifier.h (module 'flow-monitor'): void ns3::Ipv4FlowClassifier::SerializeToXmlStream(std::ostream & os, uint16_t indent) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent')],
is_const=True, is_virtual=True)
return
def register_Ns3Ipv4FlowClassifierFiveTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::FiveTuple() [constructor]
cls.add_constructor([])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::FiveTuple(ns3::Ipv4FlowClassifier::FiveTuple const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4FlowClassifier::FiveTuple const &', 'arg0')])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::destinationAddress [variable]
cls.add_instance_attribute('destinationAddress', 'ns3::Ipv4Address', is_const=False)
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::destinationPort [variable]
cls.add_instance_attribute('destinationPort', 'uint16_t', is_const=False)
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::protocol [variable]
cls.add_instance_attribute('protocol', 'uint8_t', is_const=False)
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::sourceAddress [variable]
cls.add_instance_attribute('sourceAddress', 'ns3::Ipv4Address', is_const=False)
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::sourcePort [variable]
cls.add_instance_attribute('sourcePort', 'uint16_t', is_const=False)
return
def register_Ns3Ipv4FlowClassifierSortByCount_methods(root_module, cls):
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::SortByCount::SortByCount() [constructor]
cls.add_constructor([])
## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::SortByCount::SortByCount(ns3::Ipv4FlowClassifier::SortByCount const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4FlowClassifier::SortByCount const &', 'arg0')])
## ipv4-flow-classifier.h (module 'flow-monitor'): bool ns3::Ipv4FlowClassifier::SortByCount::operator()(std::pair<ns3::Ipv4Header::DscpType,unsigned int> left, std::pair<ns3::Ipv4Header::DscpType,unsigned int> right) [member operator]
cls.add_method('operator()',
'bool',
[param('std::pair< ns3::Ipv4Header::DscpType, unsigned int >', 'left'), param('std::pair< ns3::Ipv4Header::DscpType, unsigned int >', 'right')],
custom_name=u'__call__')
return
def register_Ns3Ipv4FlowProbe_methods(root_module, cls):
## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe::Ipv4FlowProbe(ns3::Ptr<ns3::FlowMonitor> monitor, ns3::Ptr<ns3::Ipv4FlowClassifier> classifier, ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::FlowMonitor >', 'monitor'), param('ns3::Ptr< ns3::Ipv4FlowClassifier >', 'classifier'), param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-flow-probe.h (module 'flow-monitor'): static ns3::TypeId ns3::Ipv4FlowProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-flow-probe.h (module 'flow-monitor'): void ns3::Ipv4FlowProbe::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4L3Protocol_methods(root_module, cls):
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor]
cls.add_constructor([])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function]
cls.add_method('GetInterface',
'ns3::Ptr< ns3::Ipv4Interface >',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'addr')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUnicast(ns3::Ipv4Address ad) const [member function]
cls.add_method('IsUnicast',
'bool',
[param('ns3::Ipv4Address', 'ad')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function]
cls.add_method('SetDefaultTtl',
'void',
[param('uint8_t', 'ttl')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'i'), param('bool', 'val')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'i'), param('uint16_t', 'metric')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function]
cls.add_method('SourceAddressSelection',
'ns3::Ipv4Address',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable]
cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
visibility='private', is_virtual=True)
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function]
cls.add_method('GetGroup',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function]
cls.add_method('GetOrigin',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function]
cls.add_method('GetOutputTtlMap',
'std::map< unsigned int, unsigned int >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function]
cls.add_method('GetParent',
'uint32_t',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function]
cls.add_method('SetGroup',
'void',
[param('ns3::Ipv4Address const', 'group')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function]
cls.add_method('SetOrigin',
'void',
[param('ns3::Ipv4Address const', 'origin')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function]
cls.add_method('SetOutputTtl',
'void',
[param('uint32_t', 'oif'), param('uint32_t', 'ttl')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function]
cls.add_method('SetParent',
'void',
[param('uint32_t', 'iif')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable]
cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable]
cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True)
return
def register_Ns3Ipv4Route_methods(root_module, cls):
cls.add_output_stream_operator()
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function]
cls.add_method('GetGateway',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'dest')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function]
cls.add_method('SetGateway',
'void',
[param('ns3::Ipv4Address', 'gw')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor]
cls.add_constructor([])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')])
## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv6_methods(root_module, cls):
## ipv6.h (module 'internet'): ns3::Ipv6::Ipv6(ns3::Ipv6 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6 const &', 'arg0')])
## ipv6.h (module 'internet'): ns3::Ipv6::Ipv6() [constructor]
cls.add_constructor([])
## ipv6.h (module 'internet'): bool ns3::Ipv6::AddAddress(uint32_t interface, ns3::Ipv6InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv6InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): uint32_t ns3::Ipv6::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv6InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): int32_t ns3::Ipv6::GetInterfaceForAddress(ns3::Ipv6Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv6Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): int32_t ns3::Ipv6::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): int32_t ns3::Ipv6::GetInterfaceForPrefix(ns3::Ipv6Address address, ns3::Ipv6Prefix mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv6Address', 'address'), param('ns3::Ipv6Prefix', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): uint16_t ns3::Ipv6::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): uint16_t ns3::Ipv6::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): uint32_t ns3::Ipv6::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): uint32_t ns3::Ipv6::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv6::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv6::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv6::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ptr<ns3::Ipv6RoutingProtocol> ns3::Ipv6::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv6RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): static ns3::TypeId ns3::Ipv6::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::RegisterExtensions() [member function]
cls.add_method('RegisterExtensions',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::RegisterOptions() [member function]
cls.add_method('RegisterOptions',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::RemoveAddress(uint32_t interface, ns3::Ipv6Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv6Address source, ns3::Ipv6Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv6Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv6Address', 'source'), param('ns3::Ipv6Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv6Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetPmtu(ns3::Ipv6Address dst, uint32_t pmtu) [member function]
cls.add_method('SetPmtu',
'void',
[param('ns3::Ipv6Address', 'dst'), param('uint32_t', 'pmtu')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetRoutingProtocol(ns3::Ptr<ns3::Ipv6RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv6RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6::SourceAddressSelection(uint32_t interface, ns3::Ipv6Address dest) [member function]
cls.add_method('SourceAddressSelection',
'ns3::Ipv6Address',
[param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'dest')],
is_pure_virtual=True, is_virtual=True)
## ipv6.h (module 'internet'): ns3::Ipv6::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv6.h (module 'internet'): bool ns3::Ipv6::GetMtuDiscover() const [member function]
cls.add_method('GetMtuDiscover',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv6.h (module 'internet'): void ns3::Ipv6::SetMtuDiscover(bool mtuDiscover) [member function]
cls.add_method('SetMtuDiscover',
'void',
[param('bool', 'mtuDiscover')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6FlowClassifier_methods(root_module, cls):
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::Ipv6FlowClassifier() [constructor]
cls.add_constructor([])
## ipv6-flow-classifier.h (module 'flow-monitor'): bool ns3::Ipv6FlowClassifier::Classify(ns3::Ipv6Header const & ipHeader, ns3::Ptr<const ns3::Packet> ipPayload, uint32_t * out_flowId, uint32_t * out_packetId) [member function]
cls.add_method('Classify',
'bool',
[param('ns3::Ipv6Header const &', 'ipHeader'), param('ns3::Ptr< ns3::Packet const >', 'ipPayload'), param('uint32_t *', 'out_flowId'), param('uint32_t *', 'out_packetId')])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple ns3::Ipv6FlowClassifier::FindFlow(ns3::FlowId flowId) const [member function]
cls.add_method('FindFlow',
'ns3::Ipv6FlowClassifier::FiveTuple',
[param('ns3::FlowId', 'flowId')],
is_const=True)
## ipv6-flow-classifier.h (module 'flow-monitor'): std::vector<std::pair<ns3::Ipv6Header::DscpType, unsigned int>,std::allocator<std::pair<ns3::Ipv6Header::DscpType, unsigned int> > > ns3::Ipv6FlowClassifier::GetDscpCounts(ns3::FlowId flowId) const [member function]
cls.add_method('GetDscpCounts',
'std::vector< std::pair< ns3::Ipv6Header::DscpType, unsigned int > >',
[param('ns3::FlowId', 'flowId')],
is_const=True)
## ipv6-flow-classifier.h (module 'flow-monitor'): void ns3::Ipv6FlowClassifier::SerializeToXmlStream(std::ostream & os, uint16_t indent) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent')],
is_const=True, is_virtual=True)
return
def register_Ns3Ipv6FlowClassifierFiveTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::FiveTuple() [constructor]
cls.add_constructor([])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::FiveTuple(ns3::Ipv6FlowClassifier::FiveTuple const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6FlowClassifier::FiveTuple const &', 'arg0')])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::destinationAddress [variable]
cls.add_instance_attribute('destinationAddress', 'ns3::Ipv6Address', is_const=False)
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::destinationPort [variable]
cls.add_instance_attribute('destinationPort', 'uint16_t', is_const=False)
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::protocol [variable]
cls.add_instance_attribute('protocol', 'uint8_t', is_const=False)
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::sourceAddress [variable]
cls.add_instance_attribute('sourceAddress', 'ns3::Ipv6Address', is_const=False)
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::FiveTuple::sourcePort [variable]
cls.add_instance_attribute('sourcePort', 'uint16_t', is_const=False)
return
def register_Ns3Ipv6FlowClassifierSortByCount_methods(root_module, cls):
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::SortByCount::SortByCount() [constructor]
cls.add_constructor([])
## ipv6-flow-classifier.h (module 'flow-monitor'): ns3::Ipv6FlowClassifier::SortByCount::SortByCount(ns3::Ipv6FlowClassifier::SortByCount const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6FlowClassifier::SortByCount const &', 'arg0')])
## ipv6-flow-classifier.h (module 'flow-monitor'): bool ns3::Ipv6FlowClassifier::SortByCount::operator()(std::pair<ns3::Ipv6Header::DscpType,unsigned int> left, std::pair<ns3::Ipv6Header::DscpType,unsigned int> right) [member operator]
cls.add_method('operator()',
'bool',
[param('std::pair< ns3::Ipv6Header::DscpType, unsigned int >', 'left'), param('std::pair< ns3::Ipv6Header::DscpType, unsigned int >', 'right')],
custom_name=u'__call__')
return
def register_Ns3Ipv6FlowProbe_methods(root_module, cls):
## ipv6-flow-probe.h (module 'flow-monitor'): ns3::Ipv6FlowProbe::Ipv6FlowProbe(ns3::Ptr<ns3::FlowMonitor> monitor, ns3::Ptr<ns3::Ipv6FlowClassifier> classifier, ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::FlowMonitor >', 'monitor'), param('ns3::Ptr< ns3::Ipv6FlowClassifier >', 'classifier'), param('ns3::Ptr< ns3::Node >', 'node')])
## ipv6-flow-probe.h (module 'flow-monitor'): static ns3::TypeId ns3::Ipv6FlowProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv6-flow-probe.h (module 'flow-monitor'): void ns3::Ipv6FlowProbe::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Ipv6L3Protocol_methods(root_module, cls):
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6L3Protocol::PROT_NUMBER [variable]
cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True)
## ipv6-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv6L3Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6L3Protocol::Ipv6L3Protocol() [constructor]
cls.add_constructor([])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv6L3Protocol::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv6L3Protocol::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv6L3Protocol::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetDefaultTtl(uint8_t ttl) [member function]
cls.add_method('SetDefaultTtl',
'void',
[param('uint8_t', 'ttl')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetDefaultTclass(uint8_t tclass) [member function]
cls.add_method('SetDefaultTclass',
'void',
[param('uint8_t', 'tclass')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv6Address source, ns3::Ipv6Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv6Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv6Address', 'source'), param('ns3::Ipv6Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv6Route >', 'route')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv6RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv6RoutingProtocol >', 'routingProtocol')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv6RoutingProtocol> ns3::Ipv6L3Protocol::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv6RoutingProtocol >',
[],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv6L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv6Interface> ns3::Ipv6L3Protocol::GetInterface(uint32_t i) const [member function]
cls.add_method('GetInterface',
'ns3::Ptr< ns3::Ipv6Interface >',
[param('uint32_t', 'i')],
is_const=True)
## ipv6-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv6L3Protocol::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): int32_t ns3::Ipv6L3Protocol::GetInterfaceForAddress(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): int32_t ns3::Ipv6L3Protocol::GetInterfaceForPrefix(ns3::Ipv6Address addr, ns3::Ipv6Prefix mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv6Address', 'addr'), param('ns3::Ipv6Prefix', 'mask')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): int32_t ns3::Ipv6L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::AddAddress(uint32_t i, ns3::Ipv6InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'i'), param('ns3::Ipv6InterfaceAddress', 'address')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv6InterfaceAddress',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv6L3Protocol::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::RemoveAddress(uint32_t interfaceIndex, ns3::Ipv6Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('ns3::Ipv6Address', 'address')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'i'), param('uint16_t', 'metric')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv6L3Protocol::GetMetric(uint32_t i) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv6L3Protocol::GetMtu(uint32_t i) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetPmtu(ns3::Ipv6Address dst, uint32_t pmtu) [member function]
cls.add_method('SetPmtu',
'void',
[param('ns3::Ipv6Address', 'dst'), param('uint32_t', 'pmtu')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::IsUp(uint32_t i) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetUp(uint32_t i) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetDown(uint32_t i) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::IsForwarding(uint32_t i) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetForwarding(uint32_t i, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'i'), param('bool', 'val')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6L3Protocol::SourceAddressSelection(uint32_t interface, ns3::Ipv6Address dest) [member function]
cls.add_method('SourceAddressSelection',
'ns3::Ipv6Address',
[param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'dest')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv6L3Protocol::GetNetDevice(uint32_t i) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Icmpv6L4Protocol> ns3::Ipv6L3Protocol::GetIcmpv6() const [member function]
cls.add_method('GetIcmpv6',
'ns3::Ptr< ns3::Icmpv6L4Protocol >',
[],
is_const=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::AddAutoconfiguredAddress(uint32_t interface, ns3::Ipv6Address network, ns3::Ipv6Prefix mask, uint8_t flags, uint32_t validTime, uint32_t preferredTime, ns3::Ipv6Address defaultRouter=ns3::Ipv6Address::GetZero( )) [member function]
cls.add_method('AddAutoconfiguredAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'network'), param('ns3::Ipv6Prefix', 'mask'), param('uint8_t', 'flags'), param('uint32_t', 'validTime'), param('uint32_t', 'preferredTime'), param('ns3::Ipv6Address', 'defaultRouter', default_value='ns3::Ipv6Address::GetZero( )')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::RemoveAutoconfiguredAddress(uint32_t interface, ns3::Ipv6Address network, ns3::Ipv6Prefix mask, ns3::Ipv6Address defaultRouter) [member function]
cls.add_method('RemoveAutoconfiguredAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv6Address', 'network'), param('ns3::Ipv6Prefix', 'mask'), param('ns3::Ipv6Address', 'defaultRouter')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::RegisterExtensions() [member function]
cls.add_method('RegisterExtensions',
'void',
[],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::RegisterOptions() [member function]
cls.add_method('RegisterOptions',
'void',
[],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::ReportDrop(ns3::Ipv6Header ipHeader, ns3::Ptr<ns3::Packet> p, ns3::Ipv6L3Protocol::DropReason dropReason) [member function]
cls.add_method('ReportDrop',
'void',
[param('ns3::Ipv6Header', 'ipHeader'), param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv6L3Protocol::DropReason', 'dropReason')],
is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::AddMulticastAddress(ns3::Ipv6Address address) [member function]
cls.add_method('AddMulticastAddress',
'void',
[param('ns3::Ipv6Address', 'address')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::AddMulticastAddress(ns3::Ipv6Address address, uint32_t interface) [member function]
cls.add_method('AddMulticastAddress',
'void',
[param('ns3::Ipv6Address', 'address'), param('uint32_t', 'interface')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::RemoveMulticastAddress(ns3::Ipv6Address address) [member function]
cls.add_method('RemoveMulticastAddress',
'void',
[param('ns3::Ipv6Address', 'address')])
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::RemoveMulticastAddress(ns3::Ipv6Address address, uint32_t interface) [member function]
cls.add_method('RemoveMulticastAddress',
'void',
[param('ns3::Ipv6Address', 'address'), param('uint32_t', 'interface')])
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::IsRegisteredMulticastAddress(ns3::Ipv6Address address) const [member function]
cls.add_method('IsRegisteredMulticastAddress',
'bool',
[param('ns3::Ipv6Address', 'address')],
is_const=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::IsRegisteredMulticastAddress(ns3::Ipv6Address address, uint32_t interface) const [member function]
cls.add_method('IsRegisteredMulticastAddress',
'bool',
[param('ns3::Ipv6Address', 'address'), param('uint32_t', 'interface')],
is_const=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
visibility='private', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetMtuDiscover(bool mtuDiscover) [member function]
cls.add_method('SetMtuDiscover',
'void',
[param('bool', 'mtuDiscover')],
visibility='private', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::GetMtuDiscover() const [member function]
cls.add_method('GetMtuDiscover',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): void ns3::Ipv6L3Protocol::SetSendIcmpv6Redirect(bool sendIcmpv6Redirect) [member function]
cls.add_method('SetSendIcmpv6Redirect',
'void',
[param('bool', 'sendIcmpv6Redirect')],
visibility='private', is_virtual=True)
## ipv6-l3-protocol.h (module 'internet'): bool ns3::Ipv6L3Protocol::GetSendIcmpv6Redirect() const [member function]
cls.add_method('GetSendIcmpv6Redirect',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv6PmtuCache_methods(root_module, cls):
## ipv6-pmtu-cache.h (module 'internet'): ns3::Ipv6PmtuCache::Ipv6PmtuCache(ns3::Ipv6PmtuCache const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PmtuCache const &', 'arg0')])
## ipv6-pmtu-cache.h (module 'internet'): ns3::Ipv6PmtuCache::Ipv6PmtuCache() [constructor]
cls.add_constructor([])
## ipv6-pmtu-cache.h (module 'internet'): void ns3::Ipv6PmtuCache::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## ipv6-pmtu-cache.h (module 'internet'): uint32_t ns3::Ipv6PmtuCache::GetPmtu(ns3::Ipv6Address dst) [member function]
cls.add_method('GetPmtu',
'uint32_t',
[param('ns3::Ipv6Address', 'dst')])
## ipv6-pmtu-cache.h (module 'internet'): ns3::Time ns3::Ipv6PmtuCache::GetPmtuValidityTime() const [member function]
cls.add_method('GetPmtuValidityTime',
'ns3::Time',
[],
is_const=True)
## ipv6-pmtu-cache.h (module 'internet'): static ns3::TypeId ns3::Ipv6PmtuCache::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv6-pmtu-cache.h (module 'internet'): void ns3::Ipv6PmtuCache::SetPmtu(ns3::Ipv6Address dst, uint32_t pmtu) [member function]
cls.add_method('SetPmtu',
'void',
[param('ns3::Ipv6Address', 'dst'), param('uint32_t', 'pmtu')])
## ipv6-pmtu-cache.h (module 'internet'): bool ns3::Ipv6PmtuCache::SetPmtuValidityTime(ns3::Time validity) [member function]
cls.add_method('SetPmtuValidityTime',
'bool',
[param('ns3::Time', 'validity')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3QueueItem_methods(root_module, cls):
cls.add_output_stream_operator()
## queue-item.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')])
## queue-item.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function]
cls.add_method('GetPacket',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## queue-item.h (module 'network'): uint32_t ns3::QueueItem::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## queue-item.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function]
cls.add_method('GetUint8Value',
'bool',
[param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')],
is_const=True, is_virtual=True)
## queue-item.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3QueueDiscItem_methods(root_module, cls):
## queue-item.h (module 'network'): ns3::QueueDiscItem::QueueDiscItem(ns3::Ptr<ns3::Packet> p, ns3::Address const & addr, uint16_t protocol) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Address const &', 'addr'), param('uint16_t', 'protocol')])
## queue-item.h (module 'network'): ns3::Address ns3::QueueDiscItem::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True)
## queue-item.h (module 'network'): uint16_t ns3::QueueDiscItem::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint16_t',
[],
is_const=True)
## queue-item.h (module 'network'): uint8_t ns3::QueueDiscItem::GetTxQueueIndex() const [member function]
cls.add_method('GetTxQueueIndex',
'uint8_t',
[],
is_const=True)
## queue-item.h (module 'network'): void ns3::QueueDiscItem::SetTxQueueIndex(uint8_t txq) [member function]
cls.add_method('SetTxQueueIndex',
'void',
[param('uint8_t', 'txq')])
## queue-item.h (module 'network'): void ns3::QueueDiscItem::AddHeader() [member function]
cls.add_method('AddHeader',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## queue-item.h (module 'network'): void ns3::QueueDiscItem::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## queue-item.h (module 'network'): bool ns3::QueueDiscItem::Mark() [member function]
cls.add_method('Mark',
'bool',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,589,814,336,349,080,000 | 63.97481 | 934 | 0.60809 | false |
Coaxis-ASP/opt | daemon/api/tests/test_scanner.py | 2 | 4136 | import unittest
from scanner import Scanner
from tests.stub_network_tools import NetworkToolsStub
class ScannerTestCase(unittest.TestCase):
def test_scan_detect_devices_on_optbox_network(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='127.0.0.1')
scan = scanner.scan(port=9100)
self.assertIsInstance(scan['devices'], dict)
def test_scan_detect_open_port_on_optbox_network(self):
hostname = '10.0.1.250'
port = '9100'
scanner = Scanner(network_tools=NetworkToolsStub(), hostname=hostname)
scan = scanner.scan(port=port)
self.assertEqual(scan['devices'][hostname]['open'], True)
def test_can_get_device_infos_via_snmp(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='10.0.1.231')
details = scanner.get_device_infos(hostname='10.0.1.250')
self.assertDictEqual(details, {
'description': {'oid': '.1.3.6.1.2.1.25.3.2.1.3.1', 'value': 'Brother HL-5250DN series'},
'pageCount': {'oid': '.1.3.6.1.2.1.43.10.2.1.4.1.1', 'value': 22625},
'sysContact': {'oid': '.1.3.6.1.2.1.1.4.0', 'value': ''},
'sysDescription': {'oid': '.1.3.6.1.2.1.1.1.0',
'value': 'Brother NC-6400h, Firmware Ver.1.01 (05.08.31),MID 84UZ92'},
'sysName': {'oid': '.1.3.6.1.2.1.1.5.0', 'value': 'BRN_7D3B43'},
'uptime': {'oid': '.1.3.6.1.2.1.1.3.0', 'value': 143431460}
})
def test_get_netmask_from_optbox(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='10.0.1.133')
netmask = scanner.get_netmask()
self.assertEqual(netmask, '/24')
def test_parse_netmask(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='127.0.0.1')
stdout = [
"1: lo inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever",
"3: wlp4s0 inet 10.0.1.133/24 brd 10.0.1.255 scope global dynamic wlp4s0\ valid_lft 58984sec preferred_lft 58984sec",
"4: docker0 inet 172.17.0.1/16 scope global docker0\ valid_lft forever preferred_lft forever",
"5: br-a49026d1a341 inet 172.18.0.1/16 scope global br-a49026d1a341\ valid_lft forever preferred_lft forever",
"6: br-d26f2005f732 inet 172.19.0.1/16 scope global br-d26f2005f732\ valid_lft forever preferred_lft forever",
]
netmask = scanner.parse_address(stdout)
self.assertEqual(netmask, '/8')
def test_clean_nmap_data_to_keep_only_useful(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='10.0.1.231')
nmap = scanner.network_tools.nmap('10.0.1.231/24', 9100)
clean = scanner.clean_nmap(nmap)
self.assertDictEqual(clean, {
'raw': 'nmap -oX - -p 9100 -T3 --open 10.0.1.231/24',
'devices': {
'10.0.1.250': {'hostname': '10.0.1.250', 'port': 9100, 'open': True},
'10.0.1.248': {'hostname': '10.0.1.248', 'port': 9100, 'open': True}
}
})
def test_add_snmp_infos(self):
scanner = Scanner(network_tools=NetworkToolsStub(), hostname='10.0.1.231')
nmap = {'devices': {'10.0.1.250': {'hostname': '10.0.1.250', 'port': 9100, 'open': True}}}
results = scanner.add_snmp_infos(nmap)
self.assertDictEqual(results['devices']['10.0.1.250'], {
'hostname': '10.0.1.250',
'port': 9100,
'open': True,
'description': {'oid': '.1.3.6.1.2.1.25.3.2.1.3.1', 'value': 'Brother HL-5250DN series'},
'pageCount': {'oid': '.1.3.6.1.2.1.43.10.2.1.4.1.1', 'value': 22625},
'sysContact': {'oid': '.1.3.6.1.2.1.1.4.0', 'value': ''},
'sysDescription': {'oid': '.1.3.6.1.2.1.1.1.0',
'value': 'Brother NC-6400h, Firmware Ver.1.01 (05.08.31),MID 84UZ92'},
'sysName': {'oid': '.1.3.6.1.2.1.1.5.0', 'value': 'BRN_7D3B43'},
'uptime': {'oid': '.1.3.6.1.2.1.1.3.0', 'value': 143431460}
})
| gpl-3.0 | 6,971,212,879,150,504,000 | 44.955556 | 138 | 0.566006 | false |
NTesla/pattern | pattern/graph/commonsense.py | 21 | 11504 | #### PATTERN | COMMONSENSE #########################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
from codecs import BOM_UTF8
from urllib import urlopen
from itertools import chain
from __init__ import Graph, Node, Edge, bfs
from __init__ import WEIGHT, CENTRALITY, EIGENVECTOR, BETWEENNESS
import os
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
#### COMMONSENSE SEMANTIC NETWORK ##################################################################
#--- CONCEPT ---------------------------------------------------------------------------------------
class Concept(Node):
def __init__(self, *args, **kwargs):
""" A concept in the sematic network.
"""
Node.__init__(self, *args, **kwargs)
self._properties = None
@property
def halo(self, depth=2):
""" Returns the concept halo: a list with this concept + surrounding concepts.
This is useful to reason more fluidly about the concept,
since the halo will include latent properties linked to nearby concepts.
"""
return self.flatten(depth=depth)
@property
def properties(self):
""" Returns the top properties in the concept halo, sorted by betweenness centrality.
The return value is a list of concept id's instead of Concepts (for performance).
"""
if self._properties is None:
g = self.graph.copy(nodes=self.halo)
p = (n for n in g.nodes if n.id in self.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: n.centrality))]
self._properties = p
return self._properties
def halo(concept, depth=2):
return concept.flatten(depth=depth)
def properties(concept, depth=2, centrality=BETWEENNESS):
g = concept.graph.copy(nodes=halo(concept, depth))
p = (n for n in g.nodes if n.id in concept.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: getattr(n, centrality)))]
return p
#--- RELATION --------------------------------------------------------------------------------------
class Relation(Edge):
def __init__(self, *args, **kwargs):
""" A relation between two concepts, with an optional context.
For example, "Felix is-a cat" is in the "media" context, "tiger is-a cat" in "nature".
"""
self.context = kwargs.pop("context", None)
Edge.__init__(self, *args, **kwargs)
#--- HEURISTICS ------------------------------------------------------------------------------------
# Similarity between concepts is measured using a featural approach:
# a comparison of the features/properties that are salient in each concept's halo.
# Commonsense.similarity() takes an optional "heuristic" parameter to tweak this behavior.
# It is a tuple of two functions:
# 1) function(concept) returns a list of salient properties (or other),
# 2) function(concept1, concept2) returns the cost to traverse this edge (0.0-1.0).
COMMONALITY = (
# Similarity heuristic that only traverses relations between properties.
lambda concept: concept.properties,
lambda edge: 1 - int(edge.context == "properties" and \
edge.type != "is-opposite-of"))
#--- COMMONSENSE -----------------------------------------------------------------------------------
class Commonsense(Graph):
def __init__(self, data=os.path.join(MODULE, "commonsense.csv"), **kwargs):
""" A semantic network of commonsense, using different relation types:
- is-a,
- is-part-of,
- is-opposite-of,
- is-property-of,
- is-related-to,
- is-same-as,
- is-effect-of.
"""
Graph.__init__(self, **kwargs)
self._properties = None
# Load data from the given path,
# a CSV-file of (concept1, relation, concept2, context, weight)-items.
if data is not None:
s = open(data).read()
s = s.strip(BOM_UTF8)
s = s.decode("utf-8")
s = ((v.strip("\"") for v in r.split(",")) for r in s.splitlines())
for concept1, relation, concept2, context, weight in s:
self.add_edge(concept1, concept2,
type = relation,
context = context,
weight = min(int(weight)*0.1, 1.0))
@property
def concepts(self):
return self.nodes
@property
def relations(self):
return self.edges
@property
def properties(self):
""" Yields all concepts that are properties (i.e., adjectives).
For example: "cold is-property-of winter" => "cold".
"""
if self._properties is None:
#self._properties = set(e.node1.id for e in self.edges if e.type == "is-property-of")
self._properties = (e for e in self.edges if e.context == "properties")
self._properties = set(chain(*((e.node1.id, e.node2.id) for e in self._properties)))
return self._properties
def add_node(self, id, *args, **kwargs):
""" Returns a Concept (Node subclass).
"""
self._properties = None
kwargs.setdefault("base", Concept)
return Graph.add_node(self, id, *args, **kwargs)
def add_edge(self, id1, id2, *args, **kwargs):
""" Returns a Relation between two concepts (Edge subclass).
"""
self._properties = None
kwargs.setdefault("base", Relation)
return Graph.add_edge(self, id1, id2, *args, **kwargs)
def remove(self, x):
self._properties = None
Graph.remove(self, x)
def similarity(self, concept1, concept2, k=3, heuristic=COMMONALITY):
""" Returns the similarity of the given concepts,
by cross-comparing shortest path distance between k concept properties.
A given concept can also be a flat list of properties, e.g. ["creepy"].
The given heuristic is a tuple of two functions:
1) function(concept) returns a list of salient properties,
2) function(edge) returns the cost for traversing this edge (0.0-1.0).
"""
if isinstance(concept1, basestring):
concept1 = self[concept1]
if isinstance(concept2, basestring):
concept2 = self[concept2]
if isinstance(concept1, Node):
concept1 = heuristic[0](concept1)
if isinstance(concept2, Node):
concept2 = heuristic[0](concept2)
if isinstance(concept1, list):
concept1 = [isinstance(n, Node) and n or self[n] for n in concept1]
if isinstance(concept2, list):
concept2 = [isinstance(n, Node) and n or self[n] for n in concept2]
h = lambda id1, id2: heuristic[1](self.edge(id1, id2))
w = 0.0
for p1 in concept1[:k]:
for p2 in concept2[:k]:
p = self.shortest_path(p1, p2, heuristic=h)
w += 1.0 / (p is None and 1e10 or len(p))
return w / k
def nearest_neighbors(self, concept, concepts=[], k=3):
""" Returns the k most similar concepts from the given list.
"""
return sorted(concepts, key=lambda candidate: self.similarity(concept, candidate, k), reverse=True)
similar = neighbors = nn = nearest_neighbors
def taxonomy(self, concept, depth=3, fringe=2):
""" Returns a list of concepts that are descendants of the given concept, using "is-a" relations.
Creates a subgraph of "is-a" related concepts up to the given depth,
then takes the fringe (i.e., leaves) of the subgraph.
"""
def traversable(node, edge):
# Follow parent-child edges.
return edge.node2 == node and edge.type == "is-a"
if not isinstance(concept, Node):
concept = self[concept]
g = self.copy(nodes=concept.flatten(depth, traversable))
g = g.fringe(depth=fringe)
g = [self[n.id] for n in g if n != concept]
return g
field = semantic_field = taxonomy
#g = Commonsense()
#print(g.nn("party", g.field("animal")))
#print(g.nn("creepy", g.field("animal")))
#### COMMONSENSE DATA ##############################################################################
#--- NODEBOX.NET/PERCEPTION ------------------------------------------------------------------------
def download(path=os.path.join(MODULE, "commonsense.csv"), threshold=50):
""" Downloads commonsense data from http://nodebox.net/perception.
Saves the data as commonsense.csv which can be the input for Commonsense.load().
"""
s = "http://nodebox.net/perception?format=txt&robots=1"
s = urlopen(s).read()
s = s.decode("utf-8")
s = s.replace("\\'", "'")
# Group relations by author.
a = {}
for r in ([v.strip("'") for v in r.split(", ")] for r in s.split("\n")):
if len(r) == 7:
a.setdefault(r[-2], []).append(r)
# Iterate authors sorted by number of contributions.
# 1) Authors with 50+ contributions can define new relations and context.
# 2) Authors with 50- contributions (or robots) can only reinforce existing relations.
a = sorted(a.items(), cmp=lambda v1, v2: len(v2[1]) - len(v1[1]))
r = {}
for author, relations in a:
if author == "" or author.startswith("robots@"):
continue
if len(relations) < threshold:
break
# Sort latest-first (we prefer more recent relation types).
relations = sorted(relations, cmp=lambda r1, r2: r1[-1] > r2[-1])
# 1) Define new relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id not in r:
r[id] = [None, 0]
if r[id][0] is None and context is not None:
r[id][0] = context
for author, relations in a:
# 2) Reinforce existing relations.
for concept1, relation, concept2, context, weight, author, date in relations:
id = (concept1, relation, concept2)
if id in r:
r[id][1] += int(weight)
# Export CSV-file.
s = []
for (concept1, relation, concept2), (context, weight) in r.items():
s.append("\"%s\",\"%s\",\"%s\",\"%s\",%s" % (
concept1, relation, concept2, context, weight))
f = open(path, "w")
f.write(BOM_UTF8)
f.write("\n".join(s).encode("utf-8"))
f.close()
def json():
""" Returns a JSON-string with the data from commonsense.csv.
Each relation is encoded as a [concept1, relation, concept2, context, weight] list.
"""
f = lambda s: s.replace("'", "\\'").encode("utf-8")
s = []
g = Commonsense()
for e in g.edges:
s.append("\n\t['%s', '%s', '%s', '%s', %.2f]" % (
f(e.node1.id),
f(e.type),
f(e.node2.id),
f(e.context),
e.weight
))
return "commonsense = [%s];" % ", ".join(s)
#download("commonsense.csv", threshold=50)
#open("commonsense.js", "w").write(json())
| bsd-3-clause | 1,701,638,733,746,475,300 | 39.939502 | 107 | 0.549896 | false |
GNOME/conduit | conduit/Logging.py | 2 | 2552 | import os
import logging
import conduit
#The terminal has 8 colors with codes from 0 to 7
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
#The background is set with 40 plus the number of the color,
#and the foreground with 30
COLORS = {
'WARNING': COLOR_SEQ % (30 + YELLOW) + 'WARNING' + RESET_SEQ,
'INFO': COLOR_SEQ % (30 + WHITE) + 'INFO' + RESET_SEQ,
'DEBUG': COLOR_SEQ % (30 + BLUE) + 'DEBUG' + RESET_SEQ,
'CRITICAL': COLOR_SEQ % (30 + YELLOW) + 'CRITICAL' + RESET_SEQ,
'ERROR': COLOR_SEQ % (30 + RED) + 'ERROR' + RESET_SEQ,
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
if self.use_color:
record.levelname = COLORS.get(record.levelname, record.levelname)
return logging.Formatter.format(self, record)
# Custom logger class with multiple destinations
class ConduitLogger(logging.Logger):
COLOR_FORMAT = "["+BOLD_SEQ+"%(name)-20s"+RESET_SEQ+"][%(levelname)-18s] %(message)s ("+BOLD_SEQ+"%(filename)s"+RESET_SEQ+":%(lineno)d)"
NO_COLOR_FORMAT = "[%(name)-20s][%(levelname)-18s] %(message)s (%(filename)s:%(lineno)d)"
LOG_FILE_HANDLER = None
def __init__(self, name):
logging.Logger.__init__(self, name)
#Add two handlers, a stderr one, and a file one
color_formatter = ColoredFormatter(ConduitLogger.COLOR_FORMAT)
no_color_formatter = ColoredFormatter(ConduitLogger.NO_COLOR_FORMAT, False)
#create the single file appending handler
if ConduitLogger.LOG_FILE_HANDLER == None:
filename = os.environ.get('CONDUIT_LOGFILE',os.path.join(conduit.USER_DIR,'conduit.log'))
ConduitLogger.LOG_FILE_HANDLER = logging.FileHandler(filename,'w')
ConduitLogger.LOG_FILE_HANDLER.setFormatter(no_color_formatter)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(ConduitLogger.LOG_FILE_HANDLER)
self.addHandler(console)
return
def enable_debugging():
logging.getLogger().setLevel(logging.DEBUG)
def disable_debugging():
logging.getLogger().setLevel(logging.INFO)
def disable_logging():
logging.getLogger().setLevel(logging.CRITICAL+1)
logging.setLoggerClass(ConduitLogger)
| gpl-2.0 | 8,459,186,740,951,520,000 | 37.089552 | 141 | 0.655564 | false |
tszym/ansible | lib/ansible/modules/system/java_cert.py | 9 | 9582 | #!/usr/bin/python
#
# (c) 2013, RSD Services S.A
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: java_cert
version_added: '2.3'
short_description: Uses keytool to import/remove key from java keystore(cacerts)
description:
- This is a wrapper module around keytool. Which can be used to import/remove
certificates from a given java keystore.
options:
cert_url:
description:
- Basic URL to fetch SSL certificate from. One of cert_url or cert_path is required to load certificate.
cert_port:
description:
- Port to connect to URL. This will be used to create server URL:PORT
default: 443
cert_path:
description:
- Local path to load certificate from. One of cert_url or cert_path is required to load certificate.
cert_alias:
description:
- Imported certificate alias.
keystore_path:
description:
- Path to keystore.
keystore_pass:
description:
- Keystore password.
required: true
keystore_create:
description:
- Create keystore if it doesn't exist
executable:
description:
- Path to keytool binary if not used we search in PATH for it.
default: keytool
state:
description:
- Defines action which can be either certificate import or removal.
choices: [ 'present', 'absent' ]
default: present
author: Adam Hamsik @haad
'''
EXAMPLES = '''
# Import SSL certificate from google.com to a given cacerts keystore
java_cert:
cert_url: google.com
cert_port: 443
keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
keystore_pass: changeit
state: present
# Remove certificate with given alias from a keystore
java_cert:
cert_url: google.com
keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
keystore_pass: changeit
executable: /usr/lib/jvm/jre7/bin/keytool
state: absent
# Import SSL certificate from google.com to a keystore,
# create it if it doesn't exist
java_cert:
cert_url: google.com
keystore_path: /tmp/cacerts
keystore_pass: changeit
keystore_create: yes
state: present
'''
RETURN = '''
msg:
description: Output from stdout of keytool command after execution of given command.
returned: success
type: string
sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
rc:
description: Keytool command execution return value
returned: success
type: int
sample: "0"
cmd:
description: Executed command to get action done
returned: success
type: string
sample: "keytool -importcert -noprompt -keystore"
'''
import os
# import module snippets
from ansible.module_utils.basic import AnsibleModule
def check_cert_present(module, executable, keystore_path, keystore_pass, alias):
''' Check if certificate with alias is present in keystore
located at keystore_path '''
test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
"-alias '%s'")%(executable, keystore_path, keystore_pass, alias)
(check_rc, _, _) = module.run_command(test_cmd)
if check_rc == 0:
return True
return False
def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias):
''' Import certificate from URL into keystore located at keystore_path '''
fetch_cmd = ("%s -printcert -rfc -sslserver %s:%d")%(executable, url, port)
import_cmd = ("%s -importcert -noprompt -keystore '%s' "
"-storepass '%s' -alias '%s'")%(executable, keystore_path,
keystore_pass, alias)
if module.check_mode:
module.exit_json(changed=True)
# Fetch SSL certificate from remote host.
(_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
# Use remote certificate from remote host and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd,
data=fetch_out,
check_rc=False)
diff = {'before': '\n', 'after': '%s\n'%alias}
if import_rc == 0:
return module.exit_json(changed=True, msg=import_out,
rc=import_rc, cmd=import_cmd, stdout=import_out,
diff=diff)
else:
return module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
error=import_err)
def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias):
''' Import certificate from path into keystore located on
keystore_path as alias '''
import_cmd = ("%s -importcert -noprompt -keystore '%s' "
"-storepass '%s' -file '%s' -alias '%s'")%(executable,
keystore_path,
keystore_pass,
path, alias)
if module.check_mode:
module.exit_json(changed=True)
# Use local certificate from local path and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd,
check_rc=False)
diff = {'before': '\n', 'after': '%s\n'%alias}
if import_rc == 0:
return module.exit_json(changed=True, msg=import_out,
rc=import_rc, cmd=import_cmd, stdout=import_out,
error=import_err, diff=diff)
else:
return module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
def delete_cert(module, executable, keystore_path, keystore_pass, alias):
''' Delete certificate identified with alias from keystore on keystore_path '''
del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
"-alias '%s'")%(executable, keystore_path, keystore_pass, alias)
if module.check_mode:
module.exit_json(changed=True)
# Delete SSL certificate from keystore
(del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
diff = {'before': '%s\n'%alias, 'after': None}
return module.exit_json(changed=True, msg=del_out,
rc=del_rc, cmd=del_cmd, stdout=del_out,
error=del_err, diff=diff)
def test_keytool(module, executable):
''' Test if keytool is actuall executable or not '''
test_cmd = "%s"%(executable)
module.run_command(test_cmd, check_rc=True)
def test_keystore(module, keystore_path):
''' Check if we can access keystore as file or not '''
if keystore_path is None:
keystore_path = ''
if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
## Keystore doesn't exist we want to create it
return module.fail_json(changed=False,
msg="Module require existing keystore at keystore_path '%s'"
%(keystore_path))
def main():
argument_spec = dict(
cert_url=dict(type='str'),
cert_path=dict(type='str'),
cert_alias=dict(type='str'),
cert_port=dict(default='443', type='int'),
keystore_path=dict(type='str'),
keystore_pass=dict(required=True, type='str', no_log=True),
keystore_create=dict(default=False, type='bool'),
executable=dict(default='keytool', type='str'),
state=dict(default='present',
choices=['present', 'absent'])
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[['cert_path', 'cert_url']],
required_together=[['keystore_path', 'keystore_pass']],
mutually_exclusive=[
['cert_url', 'cert_path']
],
supports_check_mode=True,
)
url = module.params.get('cert_url')
path = module.params.get('cert_path')
port = module.params.get('cert_port')
cert_alias = module.params.get('cert_alias') or url
keystore_path = module.params.get('keystore_path')
keystore_pass = module.params.get('keystore_pass')
keystore_create = module.params.get('keystore_create')
executable = module.params.get('executable')
state = module.params.get('state')
if path and not cert_alias:
module.fail_json(changed=False,
msg="Using local path import from %s requires alias argument."
%(keystore_path))
test_keytool(module, executable)
if not keystore_create:
test_keystore(module, keystore_path)
cert_present = check_cert_present(module, executable, keystore_path,
keystore_pass, cert_alias)
if state == 'absent':
if cert_present:
delete_cert(module, executable, keystore_path, keystore_pass, cert_alias)
elif state == 'present':
if not cert_present:
if path:
import_cert_path(module, executable, path, keystore_path,
keystore_pass, cert_alias)
if url:
import_cert_url(module, executable, url, port, keystore_path,
keystore_pass, cert_alias)
module.exit_json(changed=False)
if __name__ == "__main__":
main()
| gpl-3.0 | 612,301,913,122,521,700 | 34.227941 | 110 | 0.604675 | false |
weedge/doubanFmSpeackerPi | test/photo.py | 1 | 5591 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# look this: http://picamera.readthedocs.io/en/release-1.2/recipes1.html
import io
import os
import sys
import time
import struct
import socket
import picamera
from PIL import Image
import cv2
import numpy as np
def photographToFile():
# Explicitly open a new file called my_image.jpg
my_file = open('photo.jpg', 'wb')
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(my_file)
# Note that at this point the data is in the file cache, but may
# not actually have been written to disk yet
my_file.close()
# Now the file has been closed, other processes should be able to
#read the image successfully
return True
def photographToBytesIO():
#write stream to BytesIO(Python’s in-memory stream class)
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='jpeg')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
image = Image.open(stream)
img = image.copy()
return img
def photographToCV():
# Create the in-memory stream
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='jpeg')
# Construct a numpy array from the stream
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, preserving colour
image = cv2.imdecode(data, 1)
# OpenCV returns an array with data in BGR order. If you want RGB instead
# use the following...
image = image[:, :, ::-1]
return image
def photographSeq():
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
for filename in camera.capture_continuous('img{counter:03d}.jpg'):
print('Captured %s' % filename)
time.sleep(300) # wait 5 minutes
def photographToServerSocket():
# NOTICE:
# The server script should be run first (don't run in pi)
# to ensure there’s a listening socket ready to accept a connection from the client script
# Start a socket listening for connections on 0.0.0.0:8000 (0.0.0.0 means
# all interfaces)
server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)
# Accept a single connection and make a file-like object out of it
# @TODO: use select/poll or use epoll for more connections 10k>
connection = server_socket.accept()[0].makefile('rb')
try:
while True:
# Read the length of the image as a 32-bit unsigned int. If the
# length is zero, quit the loop
image_len = struct.unpack('<L', connection.read(4))[0]
if not image_len:
break
# Construct a stream to hold the image data and read the image
# data from the connection
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
# Rewind the stream, open it as an image with PIL and do some
# processing on it
image_stream.seek(0)
image = Image.open(image_stream)
print('Image is %dx%d' % image.size)
image.verify()
print('Image is verified')
finally:
connection.close()
server_socket.close()
def photographToClientSocket():
# Connect a client socket to my_server:8000 (change my_server to the
# hostname of your server)
client_socket = socket.socket()
#client_socket.connect(('my_server', 8000))
client_socket.connect(('192.168.1.102', 8000))
# Make a file-like object out of the connection
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg'):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 30:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
if __name__ == '__main__':
photographToFile()
image = photographToBytesIO()
print(image)
time.sleep(3)
image = photographToCV()
print(image)
time.sleep(3)
photographSeq()
photographToClientSocket()
| apache-2.0 | 8,029,562,247,687,052,000 | 33.701863 | 96 | 0.612851 | false |
theseyi/WhereHows | wherehows-etl/src/main/resources/jython/LdapTransform.py | 3 | 9172 | #
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from org.slf4j import LoggerFactory
from wherehows.common import Constant
from com.ziclix.python.sql import zxJDBC
import sys
class LdapTransform:
_tables = {"ldap_user": {
"columns": "app_id, is_active, user_id, urn, full_name, display_name, title, employee_number, manager_urn, email, department_id, department_name, start_date, mobile_phone, wh_etl_exec_id",
"file": "ldap_user_record.csv",
"table": "stg_dir_external_user_info",
"nullif_columns":
{"department_id": "''",
"employee_number": 0,
"start_date": "'0000-00-00'",
"manager_urn": "''",
"department_name": "''",
"mobile_phone": "''",
"email": "''",
"title": "''"}
},
"ldap_group": {"columns": "app_id, group_id, sort_id, user_app_id, user_id, wh_etl_exec_id",
"file": "ldap_group_record.csv",
"table": "stg_dir_external_group_user_map",
"nullif_columns": {"user_id": "''"}
},
# "ldap_group_flatten": {"columns": "app_id, group_id, sort_id, user_app_id, user_id, wh_etl_exec_id",
# "file": "ldap_group_flatten_record.csv",
# "table": "stg_dir_external_group_user_map_flatten"
# }
}
_read_file_template = """
LOAD DATA LOCAL INFILE '{folder}/{file}'
INTO TABLE {table}
FIELDS TERMINATED BY '\x1a' ESCAPED BY '\0'
LINES TERMINATED BY '\n'
({columns});
"""
_update_column_to_null_template = """
UPDATE {table} stg
SET {column} = NULL
WHERE {column} = {column_value} and app_id = {app_id}
"""
_update_manager_info = """
update {table} stg
join (select t1.app_id, t1.user_id, t1.employee_number, t2.user_id as manager_user_id, t2.employee_number as manager_employee_number from
{table} t1 join {table} t2 on t1.manager_urn = t2.urn and t1.app_id = t2.app_id
where t1.app_id = {app_id}
) s on stg.app_id = s.app_id and stg.user_id = s.user_id
set stg.manager_user_id = s.manager_user_id
, stg.manager_employee_number = s.manager_employee_number
WHERE stg.app_id = {app_id}
"""
_get_manager_edge = """
select user_id, manager_user_id from {table} stg
where app_id = {app_id} and manager_user_id is not null and user_id <> manager_user_id
"""
_update_hierarchy_info = """
update {table} stg
set org_hierarchy = CASE {org_hierarchy_long_string} END,
org_hierarchy_depth = CASE {org_hierarchy_depth_long_string} END
where app_id = {app_id} and user_id in ({user_ids})
"""
_update_hierarchy_info_per_row = """
update {table} stg
set org_hierarchy = '{org_hierarchy}',
org_hierarchy_depth = {org_hierarchy_depth}
where app_id = {app_id} and user_id = '{user_id}'
"""
_clear_staging_tempalte = """
DELETE FROM {table} where app_id = {app_id}
"""
def __init__(self, args):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.wh_con = zxJDBC.connect(args[Constant.WH_DB_URL_KEY],
args[Constant.WH_DB_USERNAME_KEY],
args[Constant.WH_DB_PASSWORD_KEY],
args[Constant.WH_DB_DRIVER_KEY])
self.wh_cursor = self.wh_con.cursor()
self.app_id = int(args[Constant.JOB_REF_ID_KEY])
self.group_app_id = int(args[Constant.LDAP_GROUP_APP_ID_KEY])
self.app_folder = args[Constant.WH_APP_FOLDER_KEY]
self.metadata_folder = self.app_folder + "/" + str(self.app_id)
self.ceo_user_id = args[Constant.LDAP_CEO_USER_ID_KEY]
def run(self):
try:
self.read_file_to_stg()
self.update_null_value()
self.update_manager_info()
#self.update_hierarchy_info()
finally:
self.wh_cursor.close()
self.wh_con.close()
def read_file_to_stg(self):
for table in self._tables:
t = self._tables[table]
# Clear stagging table
query = self._clear_staging_tempalte.format(table=t.get("table"), app_id=self.app_id)
print query
self.wh_cursor.execute(query)
self.wh_con.commit()
# Load file into stagging table
query = self._read_file_template.format(folder=self.metadata_folder, file=t.get("file"), table=t.get("table"), columns=t.get("columns"))
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_null_value(self):
for table in self._tables:
t = self._tables[table]
if 'nullif_columns' in t:
for column in t['nullif_columns']:
query = self._update_column_to_null_template.format(table=t.get("table"), column=column, column_value=t['nullif_columns'][column], app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_manager_info(self):
t = self._tables["ldap_user"]
query = self._update_manager_info.format(table=t.get("table"), app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
self.wh_con.commit()
def update_hierarchy_info(self):
t = self._tables["ldap_user"]
query = self._get_manager_edge.format(table=t.get("table"), app_id=self.app_id)
self.logger.debug(query)
self.wh_cursor.execute(query)
user_mgr_map = dict()
hierarchy = dict()
for row in self.wh_cursor:
user_mgr_map[row[0]] = row[1]
for user in user_mgr_map:
self.find_path_for_user(user, user_mgr_map, hierarchy)
case_org_hierarchy_template = " WHEN user_id = '{user_id}' THEN '{org_hierarchy}' "
case_org_hierarchy_depth_template = " WHEN user_id = '{user_id}' THEN {org_hierarchy_depth} "
user_ids = []
org_hierarchy_long_string = ""
org_hierarchy_depth_long_string = ""
count = 0
for user in hierarchy:
if hierarchy[user] is not None:
user_ids.append("'" + user + "'")
org_hierarchy_long_string += case_org_hierarchy_template.format(user_id=user, org_hierarchy=hierarchy[user][0])
org_hierarchy_depth_long_string += case_org_hierarchy_depth_template.format(user_id=user, org_hierarchy_depth=hierarchy[user][1])
count += 1
if count % 1000 == 0:
query = self._update_hierarchy_info.format(table=t.get("table"), app_id=self.app_id, user_ids=",".join(user_ids), org_hierarchy_long_string=org_hierarchy_long_string,
org_hierarchy_depth_long_string=org_hierarchy_depth_long_string)
# self.logger.debug(query)
self.wh_cursor.executemany(query)
user_ids = []
org_hierarchy_long_string = ""
org_hierarchy_depth_long_string = ""
query = self._update_hierarchy_info.format(table=t.get("table"), app_id=self.app_id, user_ids=",".join(user_ids), org_hierarchy_long_string=org_hierarchy_long_string,
org_hierarchy_depth_long_string=org_hierarchy_depth_long_string)
# self.logger.debug(query)
self.wh_cursor.executemany(query)
self.wh_con.commit()
def find_path_for_user(self, start, user_mgr_map, hierarchy):
if start in hierarchy:
return hierarchy[start]
if start is None or start == '':
return None
path = "/" + start
depth = 0
user = start
while user in user_mgr_map:
if user == self.ceo_user_id or user == user_mgr_map[user]:
break
user = user_mgr_map[user]
path = "/" + user + path
depth += 1
if user == self.ceo_user_id:
break
if path:
hierarchy[start] = (path, depth)
if len(hierarchy) % 1000 == 0:
self.logger.info("%d hierarchy path created in cache so far. [%s]" % (len(hierarchy), start))
return (path, depth)
if __name__ == "__main__":
props = sys.argv[1]
lt = LdapTransform(props)
lt.run()
| apache-2.0 | -8,960,257,338,707,552,000 | 40.315315 | 192 | 0.557457 | false |
OpenXT/sync-cli | sync_cli/modify_vm_instance_name.py | 1 | 1885 | #
# Copyright (c) 2013 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import arguments
import connect
def add_subparser(subparsers):
description = """Modify the name of a VM instance."""
parser = subparsers.add_parser("modify-vm-instance-name",
help="modify VM instance name",
description=description)
arguments.add_output_args(parser, suppress=True)
parser.add_argument("vm_instance",
metavar="VM_INSTANCE_UUID",
action=arguments.StoreSingleNonEmptyValue,
help="VM instance uuid")
parser.add_argument("name",
metavar="VM_INSTANCE_NAME",
action=arguments.StoreSingleNonEmptyValue,
help="VM instance name")
parser.set_defaults(func=_run)
def _run(args, config):
connection = connect.connect(args, config)
cursor = connection.cursor()
cursor.callproc("sync_admin.modify_vm_instance_name",
keywordParameters={
"vm_instance_uuid": args.vm_instance,
"vm_instance_name": args.name})
| gpl-2.0 | 3,020,705,549,315,304,000 | 36.7 | 75 | 0.636074 | false |
akariv/redash | tests/handlers/test_authentication.py | 4 | 2213 | from tests import BaseTestCase
import mock
import time
from redash.models import User
from redash.authentication.account import invite_token
from tests.handlers import get_request, post_request
class TestInvite(BaseTestCase):
def test_expired_invite_token(self):
with mock.patch('time.time') as patched_time:
patched_time.return_value = time.time() - (7 * 24 * 3600) - 10
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_invite_token(self):
response = get_request('/invite/badtoken', org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_valid_token(self):
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 200)
def test_already_active_user(self):
pass
class TestInvitePost(BaseTestCase):
def test_empty_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_bad_token(self):
response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_already_active_user(self):
pass
def test_valid_password(self):
token = invite_token(self.factory.user)
password = 'test1234'
response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org)
self.assertEqual(response.status_code, 302)
self.factory.user = User.get_by_id(self.factory.user.id)
self.assertTrue(self.factory.user.verify_password(password))
| bsd-2-clause | -5,353,357,023,087,710,000 | 37.824561 | 118 | 0.671487 | false |
Mecanon/morphing_wing | dynamic_model/optimization/deap_optimizer.py | 1 | 11813 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 06 17:40:21 2016
@author: Pedro Leal
"""
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import numpy as np
import os
import sys
import math
from scipy.interpolate import interp1d
import pickle
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
import matlab.engine
#adding path to static model
lib_path = os.path.abspath(os.path.join('..'))
sys.path.append(lib_path)
from static_model import run_multiobjective
#from power_usage import power
#==============================================================================
# Power calculation
#==============================================================================
def power(delta_t, sigma, T, xi, eps_s, L_s, output = "all"):
"""
Calculate work, power and current.
- output: defines what is the function output (Power or all)
"""
sigma_o = 100e6
r = 0.000381/2.
d = 2*r
T_o = 273.15 + 30
alpha = 0. #set to zero on purpose
c = 837.36 #invented
rho = 6450.
#Transformation strain properties
H_max = 0.0550
H_min = 0.0387
sigma_crit = 0
k = 4.6849e-09
rho_E_M = 0.8e-6 #Dynalloy
rho_E_A = 1.0e-6 #Dynalloy
E_A = 3.7427e+10
E_M = 8.8888e+10
C_A = 7.9498e+06
C_M = 7.1986e+06
M_s = 363.5013
M_f = 297.9735
A_s = 324.6427
A_f = 385.0014
n1 = 0.1752
n2 = 0.1789
n3 = 0.1497
n4 = 0.2935
sigma_cal = 200E6
#==============================================================================
# # Heat Transfer parameters
#==============================================================================
# Gravity:
g = 9.8 #ms-2
# Atmospheric pressure
P_air = 101325. # Pa
# Molar
M = 0.0289644 #kg/mol
# Ideal gas constant
R = 8.31447 #J/(mol K)
# Air density:
rho_air = P_air*M / (R*T_o)
# Sutherland's law coefficients
C1 = 1.458e-6 #kg/m.s.sqrt(K)
C2 = 110.4 #K
# Air dynamic viscosity:
mu_air = (C1 * T_o**(3./2)) / (T_o+C2)
# Air kinematic viscosity:
nu_air = mu_air/rho_air
# Air specific heat at constant pressure
CP_list = [1.0038, 1.0049, 1.0063, 1.0082, 1.0106, 1.0135, 1.0206]
T_list = [275., 300., 325., 350., 375., 400., 450.]
Cp_f = interp1d(T_list, CP_list)
# Air conductivity
k_list = [2.428e-5, 2.624e-5, 2.816e-5, 3.003e-5, 3.186e-5, 3.365e-5, 3.710e-5]
k_f = interp1d(T_list, k_list)
# Nusselt number coefficients
alpha_1 = 1.
alpha_2 = 0.287
#==============================================================================
# Calculate Power and current
#==============================================================================
I_list = []
P_list = []
W_list = []
n = len(eps_s)
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_eps = eps_s[i] - eps_s[i-1]
delta_xi = xi[i] - xi[i-1]
T_avg = (T[i] + T[i-1])/2.
Cp_air = Cp_f(T_avg)
k_air = k_f(T_avg)
# Grashof number for external flow around a cylinder
Gr = 2*abs(T[i] - T_o)/(T[i] + T_o)*(g*d**3)/(nu_air**2)
# Prandtl number definition
Pr = mu_air*Cp_air/k_air
# Nusselt number and parameter
Nu = (alpha_1 + alpha_2*(Gr*Pr/(1 + (0.56/Pr)**(9./16))**(16./9))**(1./6))**2
# Calculate convection coefficient h from definition of Nusselt number
h = k_air*Nu/d
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
P_list.append(P)
if output == 'all':
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
dW = math.pi*r**2*L_s[0]*0.5*(sigma[i]+sigma[i-1])*delta_eps
I_list.append(I)
W_list.append(dW)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*(P_list[i] + P_list[i+1])/2.
if output == 'all':
return I_list, P_list, W_list, Total_power
elif output == "power":
return Total_power
#==============================================================================
# Objective function
#==============================================================================
def objfunc(x):
inputs = {'sma':{'x-':x[0], 'y-':x[1], 'x+':x[2], 'y+':x[3]},
'linear':{'x-':x[4], 'y-':x[5], 'x+':x[6], 'y+':x[7]},
'T_f': x[8]}
DataFile = open('opt_data.txt','a')
for x_i in x:
DataFile.write( '\t %.5f' % (x_i) )
DataFile.close()
# print inputs
theta, sigma, T, MVF, eps_s, L_s = run_multiobjective(inputs = inputs, parameters = [eng])
theta = theta[-1]
delta_t = 0.05
P= power(delta_t, sigma, T, MVF, eps_s, L_s, output = "power")
DataFile = open('opt_data.txt','a')
DataFile.write( '\t %.5f \t %.5f' % (theta, P) )
DataFile.write('\n')
DataFile.close()
return theta, P
#==============================================================================
# Start Matlab engine
#==============================================================================
eng = matlab.engine.start_matlab()
#Go to directory where matlab file is
eng.cd('..')
eng.cd('SMA_temperature_strain_driven')
#==============================================================================
# DEAP algorithm
#==============================================================================
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
chord = 1.
x_hinge = 0.75
safety = 0.005*chord
# Problem definition
BOUND_LOW = [x_hinge/2., -.9, x_hinge + safety, 0., x_hinge/2., -.9,
x_hinge + safety, -.9, 273.15+30.]
BOUND_UP = [x_hinge - safety, -0., chord - safety, .9, x_hinge - safety,
0.9, chord - safety, 0., 273.15+140.]
NDIM = 9
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", objfunc)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
def main(seed=None):
random.seed(seed)
# Number of generations
NGEN = 50
# Population size (has to be a multiple of 4)
MU = 40
# Mating probability
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
# stats.register("avg", np.mean, axis=0)
# stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
# print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
return pop, logbook
if __name__ == "__main__":
DataFile = open('opt_data.txt','w')
key_list = ['xs-', 'ys-', 'xs+', 'ys+', 'xl-', 'yl-', 'xl+', 'yl+', 'T_f']
output_list = ['theta', 'power']
for key in key_list + output_list:
DataFile.write(key + '\t')
DataFile.write('\n')
DataFile.close()
pop, stats = main()
pop.sort(key=lambda x: x.fitness.values)
print(stats)
import matplotlib.pyplot as plt
front = np.array([ind.fitness.values for ind in pop])
pickle.dump( front, open( "front.p", "wb" ) )
pickle.dump( pop, open( "pop.p", "wb" ) )
pickle.dump( stats, open( "stats.p", "wb" ) )
plt.scatter(np.rad2deg(front[:,0]), front[:,1], c="b")
plt.axis("tight")
plt.grid()
plt.xlabel("Deflection angle (${}^{\circ}$)")
plt.ylabel("Heating load (J)")
plt.show() | mit | 5,493,566,711,815,010,000 | 32.5625 | 128 | 0.521205 | false |
rzr/synapse | tests/unittest.py | 4 | 3179 | # -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
import logging
# logging doesn't have a "don't log anything at all EVARRRR setting,
# but since the highest value is 50, 1000000 should do ;)
NEVER = 1000000
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(NEVER)
def around(target):
"""A CLOS-style 'around' modifier, which wraps the original method of the
given instance with another piece of code.
@around(self)
def method_name(orig, *args, **kwargs):
return orig(*args, **kwargs)
"""
def _around(code):
name = code.__name__
orig = getattr(target, name)
def new(*args, **kwargs):
return code(orig, *args, **kwargs)
setattr(target, name, new)
return _around
class TestCase(unittest.TestCase):
"""A subclass of twisted.trial's TestCase which looks for 'loglevel'
attributes on both itself and its individual test methods, to override the
root logger's logging level while that test (case|method) runs."""
def __init__(self, methodName, *args, **kwargs):
super(TestCase, self).__init__(methodName, *args, **kwargs)
method = getattr(self, methodName)
level = getattr(method, "loglevel",
getattr(self, "loglevel",
NEVER))
@around(self)
def setUp(orig):
old_level = logging.getLogger().level
if old_level != level:
@around(self)
def tearDown(orig):
ret = orig()
logging.getLogger().setLevel(old_level)
return ret
logging.getLogger().setLevel(level)
# Don't set SQL logging
logging.getLogger("synapse.storage").setLevel(old_level)
return orig()
def assertObjectHasAttributes(self, attrs, obj):
"""Asserts that the given object has each of the attributes given, and
that the value of each matches according to assertEquals."""
for (key, value) in attrs.items():
if not hasattr(obj, key):
raise AssertionError("Expected obj to have a '.%s'" % key)
try:
self.assertEquals(attrs[key], getattr(obj, key))
except AssertionError as e:
raise (type(e))(e.message + " for '.%s'" % key)
def DEBUG(target):
"""A decorator to set the .loglevel attribute to logging.DEBUG.
Can apply to either a TestCase or an individual test method."""
target.loglevel = logging.DEBUG
return target
| apache-2.0 | 686,168,607,075,789,600 | 33.554348 | 78 | 0.630387 | false |
wkevina/feature-requests-app | features/migrations/0002_featurerequest.py | 1 | 1267 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('features', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FeatureRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('description', models.TextField(verbose_name='Description')),
('client_priority', models.PositiveIntegerField(verbose_name='Client Priority')),
('target_date', models.DateField(verbose_name='Target Date')),
('ticket_url', models.URLField(blank=True, verbose_name='Ticket URL')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='features.Client', verbose_name='Client')),
('product_area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='features.ProductArea', verbose_name='Product Area')),
],
),
]
| mit | -7,412,387,663,296,785,000 | 42.689655 | 153 | 0.621152 | false |
sebastianwelsh/artifacts | tests/source_type_test.py | 4 | 2142 | # -*- coding: utf-8 -*-
"""Tests for the source type objects."""
import unittest
from artifacts import errors
from artifacts import source_type
class SourceTypeTest(unittest.TestCase):
"""Class to test the artifact source type."""
class ArtifactSourceTypeTest(unittest.TestCase):
"""Class to test the artifacts source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.ArtifactSourceType(names=[u'test'])
class FileSourceTypeTest(unittest.TestCase):
"""Class to test the files source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.FileSourceType(paths=[u'test'])
source_type.FileSourceType(paths=[u'test'], separator=u'\\')
class PathSourceTypeTest(unittest.TestCase):
"""Class to test the paths source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.PathSourceType(paths=[u'test'])
source_type.PathSourceType(paths=[u'test'], separator=u'\\')
class WindowsRegistryKeySourceTypeTest(unittest.TestCase):
"""Class to test the Windows Registry keys source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.WindowsRegistryKeySourceType(keys=[u'test'])
class WindowsRegistryValueSourceTypeTest(unittest.TestCase):
"""Class to test the Windows Registry value source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.WindowsRegistryValueSourceType(
key_value_pairs=[{'key': u'test', 'value': u'test'}])
with self.assertRaises(errors.FormatError):
source_type.WindowsRegistryValueSourceType(
key_value_pairs=[{'bad': u'test', 'value': u'test'}])
with self.assertRaises(errors.FormatError):
source_type.WindowsRegistryValueSourceType(
key_value_pairs={'bad': u'test', 'value': u'test'})
class WMIQuerySourceType(unittest.TestCase):
"""Class to test the WMI query source type."""
def testInitialize(self):
"""Tests the __init__ function."""
source_type.WMIQuerySourceType(query=u'test')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,044,806,728,929,062,000 | 27.945946 | 64 | 0.693277 | false |
yl565/statsmodels | statsmodels/sandbox/tools/cross_val.py | 33 | 11875 | """
Utilities for cross validation.
taken from scikits.learn
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# License: BSD Style.
# $Id$
changes to code by josef-pktd:
- docstring formatting: underlines of headers
"""
from statsmodels.compat.python import range, lrange
import numpy as np
from itertools import combinations
################################################################################
class LeaveOneOut(object):
"""
Leave-One-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
"""
def __init__(self, n):
"""
Leave-One-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2]
"""
self.n = n
def __iter__(self):
n = self.n
for i in range(n):
test_index = np.zeros(n, dtype=np.bool)
test_index[i] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def __repr__(self):
return '%s.%s(n=%i)' % (self.__class__.__module__,
self.__class__.__name__,
self.n,
)
################################################################################
class LeavePOut(object):
"""
Leave-P-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
"""
def __init__(self, n, p):
"""
Leave-P-Out cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
p: int
Size test sets
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 3, 4]
>>> lpo = cross_val.LeavePOut(4, 2)
>>> for train_index, test_index in lpo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [False True False True] TEST: [ True False True False]
TRAIN: [False True True False] TEST: [ True False False True]
TRAIN: [ True False False True] TEST: [False True True False]
TRAIN: [ True False True False] TEST: [False True False True]
TRAIN: [ True True False False] TEST: [False False True True]
"""
self.n = n
self.p = p
def __iter__(self):
n = self.n
p = self.p
comb = combinations(lrange(n), p)
for idx in comb:
test_index = np.zeros(n, dtype=np.bool)
test_index[np.array(idx)] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
################################################################################
class KFold(object):
"""
K-Folds cross validation iterator:
Provides train/test indexes to split data in train test sets
"""
def __init__(self, n, k):
"""
K-Folds cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k: int
number of folds
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [1, 2], [3, 4]]
>>> y = [1, 2, 3, 4]
>>> kf = cross_val.KFold(4, k=2)
>>> for train_index, test_index in kf:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
TRAIN: [False False True True] TEST: [ True True False False]
TRAIN: [ True True False False] TEST: [False False True True]
Notes
-----
All the folds have size trunc(n/k), the last one has the complementary
"""
assert k>0, ValueError('cannot have k below 1')
assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
self.n = n
self.k = k
def __iter__(self):
n = self.n
k = self.k
j = int(np.ceil(n/k))
for i in range(k):
test_index = np.zeros(n, dtype=np.bool)
if i<k-1:
test_index[i*j:(i+1)*j] = True
else:
test_index[i*j:] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def __repr__(self):
return '%s.%s(n=%i, k=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.k,
)
################################################################################
class LeaveOneLabelOut(object):
"""
Leave-One-Label_Out cross-validation iterator:
Provides train/test indexes to split data in train test sets
"""
def __init__(self, labels):
"""
Leave-One-Label_Out cross validation:
Provides train/test indexes to split data in train test sets
Parameters
----------
labels : list
List of labels
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4], [5, 6], [7, 8]]
>>> y = [1, 2, 1, 2]
>>> labels = [1, 1, 2, 2]
>>> lol = cross_val.LeaveOneLabelOut(labels)
>>> for train_index, test_index in lol:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, \
test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False False True True] TEST: [ True True False False]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [ True True False False] TEST: [False False True True]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
self.labels = labels
def __iter__(self):
# We make a copy here to avoid side-effects during iteration
labels = np.array(self.labels, copy=True)
for i in np.unique(labels):
test_index = np.zeros(len(labels), dtype=np.bool)
test_index[labels==i] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def split(train_indexes, test_indexes, *args):
"""
For each arg return a train and test subsets defined by indexes provided
in train_indexes and test_indexes
"""
ret = []
for arg in args:
arg = np.asanyarray(arg)
arg_train = arg[train_indexes]
arg_test = arg[test_indexes]
ret.append(arg_train)
ret.append(arg_test)
return ret
'''
>>> cv = cross_val.LeaveOneLabelOut(X, y) # y making y optional and
possible to add other arrays of the same shape[0] too
>>> for X_train, y_train, X_test, y_test in cv:
... print np.sqrt((model.fit(X_train, y_train).predict(X_test)
- y_test) ** 2).mean())
'''
################################################################################
#below: Author: josef-pktd
class KStepAhead(object):
"""
KStepAhead cross validation iterator:
Provides fit/test indexes to split data in sequential sets
"""
def __init__(self, n, k=1, start=None, kall=True, return_slice=True):
"""
KStepAhead cross validation iterator:
Provides train/test indexes to split data in train test sets
Parameters
----------
n: int
Total number of elements
k : int
number of steps ahead
start : int
initial size of data for fitting
kall : boolean
if true. all values for up to k-step ahead are included in the test index.
If false, then only the k-th step ahead value is returnd
Notes
-----
I don't think this is really useful, because it can be done with
a very simple loop instead.
Useful as a plugin, but it could return slices instead for faster array access.
Examples
--------
>>> from scikits.learn import cross_val
>>> X = [[1, 2], [3, 4]]
>>> y = [1, 2]
>>> loo = cross_val.LeaveOneOut(2)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... X_train, X_test, y_train, y_test = cross_val.split(train_index, test_index, X, y)
... print X_train, X_test, y_train, y_test
TRAIN: [False True] TEST: [ True False]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [ True False] TEST: [False True]
[[1 2]] [[3 4]] [1] [2]
"""
self.n = n
self.k = k
if start is None:
start = int(np.trunc(n*0.25)) # pick something arbitrary
self.start = start
self.kall = kall
self.return_slice = return_slice
def __iter__(self):
n = self.n
k = self.k
start = self.start
if self.return_slice:
for i in range(start, n-k):
train_slice = slice(None, i, None)
if self.kall:
test_slice = slice(i, i+k)
else:
test_slice = slice(i+k-1, i+k)
yield train_slice, test_slice
else: #for compatibility with other iterators
for i in range(start, n-k):
train_index = np.zeros(n, dtype=np.bool)
train_index[:i] = True
test_index = np.zeros(n, dtype=np.bool)
if self.kall:
test_index[i:i+k] = True # np.logical_not(test_index)
else:
test_index[i+k-1:i+k] = True
#or faster to return np.arange(i,i+k) ?
#returning slice should be faster in this case
yield train_index, test_index
def __repr__(self):
return '%s.%s(n=%i)' % (self.__class__.__module__,
self.__class__.__name__,
self.n,
)
| bsd-3-clause | 2,122,738,916,550,013,400 | 30.922043 | 96 | 0.476884 | false |
artemrizhov/pymorphy | pymorphy/django_utils.py | 1 | 5368 | #coding: utf-8
import re
from django import template
from django.utils.encoding import force_unicode
from pymorphy.django_conf import default_morph, MARKER_OPEN, MARKER_CLOSE
from pymorphy.contrib import tokenizers
register = template.Library()
markup_re = re.compile('(%s.+?%s)' % (MARKER_OPEN, MARKER_CLOSE), re.U)
def inflect(text):
"""
Analizes Russian and English morphology and converts phrases to the
specified forms.
Обработывает фразы в тексте по данным, заключенным в сам текст.
Полезно для проектов с интернационализацией, т.к. позволяет не отрывать
описание формы слов от контекста. Например, фразу "покупайте [[рыбу|вн]]"
можно безболезненно заменить на "не уходите без [[рыбы|рд]]" прямо в файле
переводов, и при этом не придется менять исходный код программы, чтобы
изменить форму слова. Также программный код не придется насаждать
русскими словоформами вида дт,мн и тд, непонятными для нерусских
программистов, столкнувшихся с кодом. ;)
Пример использования в файле переводов:
msgid "Buy the %(product_name)s"
msgstr "Не уходите без [[%(product_name)s|рд]]"
В других языках можно просто не использовать маркер "двойные скобки",
чтобы не обрабатывать фразу.
См. также блочный тег {% blockinflect %} для шаблонов, дающий возможность
обернуть любой текст в эту функцию, в том числе и теги trans/blocktrans.
Это опять таки позволяет отправить информацию о форме слова в файл
переводов, и изменять ее там же, не касаясь шаблонов.
"""
if not text:
return text
return _process_marked_inplace_phrase(force_unicode(text),
default_morph.inflect_ru)
def _process_marked_inplace_phrase(text, process_func, *args, **kwargs):
"""
Обработать фразу. В фразе обрабатываются только куски, заключенные
в двойные квадратные скобки. Информация о форме берется из текста после
последней разделительной вертикальной черты
(например, "[[лошадь|рд]] Пржевальского").
"""
def process(m):
parts = m.group(1)[2:-2].rsplit('|', 1)
if len(parts) == 1:
return m.group(1)
return _process_phrase(parts[0], process_func, parts[1],
*args, **kwargs)
return re.sub(markup_re, process, text)
def _restore_register(morphed_word, word):
""" Восстановить регистр слова """
if '-' in word:
parts = zip(morphed_word.split('-'), word.split('-'))
return '-'.join(_restore_register(*p) for p in parts)
if word.isupper():
return morphed_word.upper()
elif word[0].isupper():
return morphed_word[0].upper() + morphed_word[1:].lower()
else:
return morphed_word.lower()
def _process_phrase(phrase, process_func, *args, **kwargs):
""" обработать фразу """
words = tokenizers.extract_tokens(phrase)
result=""
try:
for word in words:
if tokenizers.GROUPING_SPACE_REGEX.match(word):
result += word
continue
processed = process_func(word.upper(), *args, **kwargs)
processed = _restore_register(processed, word) if processed else word
result += processed
except Exception:
return phrase
return result
def _process_marked_phrase(phrase, process_func, *args, **kwargs):
""" Обработать фразу. В фразе обрабатываются только куски, заключенные
в двойные квадратные скобки (например, "[[лошадь]] Пржевальского").
"""
def process(m):
return _process_phrase(m.group(1)[2:-2],
process_func, *args, **kwargs)
return re.sub(markup_re, process, phrase)
def _process_unmarked_phrase(phrase, process_func, *args, **kwargs):
""" Обработать фразу. В фразе не обрабатываются куски, заключенные
в двойные квадратные скобки (например, "лошадь [[Пржевальского]]").
"""
def process(part):
if not re.match(markup_re, part):
return _process_phrase(part, process_func, *args, **kwargs)
return part[2:-2]
parts = [process(s) for s in re.split(markup_re, phrase)]
return "".join(parts)
| mit | -7,947,730,506,973,715,000 | 35.12931 | 81 | 0.651634 | false |
serverdensity/sd-agent-core-plugins | yarn/test_yarn.py | 1 | 8597 | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from urlparse import urljoin
import os
# 3rd party
import mock
import json
from tests.checks.common import AgentCheckTest, Fixtures
# IDs
CLUSTER_NAME = 'SparkCluster'
# Resource manager URI
RM_ADDRESS = 'http://localhost:8088'
# Service URLs
YARN_CLUSTER_METRICS_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/metrics')
YARN_APPS_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/apps') + '?states=RUNNING'
YARN_NODES_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/nodes')
YARN_SCHEDULER_URL = urljoin(RM_ADDRESS, '/ws/v1/cluster/scheduler')
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
collected_from_app_url = False
def requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
if args[0] == YARN_CLUSTER_METRICS_URL:
with open(Fixtures.file('cluster_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_APPS_URL:
with open(Fixtures.file('apps_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
global collected_from_app_url
collected_from_app_url = True
return MockResponse(body, 200)
elif args[0] == YARN_NODES_URL:
with open(Fixtures.file('nodes_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == YARN_SCHEDULER_URL:
with open(Fixtures.file('scheduler_metrics', sdk_dir=FIXTURE_DIR), 'r') as f:
body = f.read()
return MockResponse(body, 200)
class YARNCheck(AgentCheckTest):
CHECK_NAME = 'yarn'
YARN_CONFIG = {
'resourcemanager_uri': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'tags': [
'opt_key:opt_value'
],
'application_tags': {
'app_id': 'id',
'app_queue': 'queue'
},
'queue_blacklist': [
'nofollowqueue'
]
}
YARN_CONFIG_EXCLUDING_APP = {
'resourcemanager_uri': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'tags': [
'opt_key:opt_value'
],
'application_tags': {
'app_id': 'id',
'app_queue': 'queue'
},
'collect_app_metrics': 'false'
}
YARN_CLUSTER_METRICS_VALUES = {
'yarn.metrics.apps_submitted': 0,
'yarn.metrics.apps_completed': 0,
'yarn.metrics.apps_pending': 0,
'yarn.metrics.apps_running': 0,
'yarn.metrics.apps_failed': 0,
'yarn.metrics.apps_killed': 0,
'yarn.metrics.reserved_mb': 0,
'yarn.metrics.available_mb': 17408,
'yarn.metrics.allocated_mb': 0,
'yarn.metrics.total_mb': 17408,
'yarn.metrics.reserved_virtual_cores': 0,
'yarn.metrics.available_virtual_cores': 7,
'yarn.metrics.allocated_virtual_cores': 1,
'yarn.metrics.total_virtual_cores': 8,
'yarn.metrics.containers_allocated': 0,
'yarn.metrics.containers_reserved': 0,
'yarn.metrics.containers_pending': 0,
'yarn.metrics.total_nodes': 1,
'yarn.metrics.active_nodes': 1,
'yarn.metrics.lost_nodes': 0,
'yarn.metrics.unhealthy_nodes': 0,
'yarn.metrics.decommissioned_nodes': 0,
'yarn.metrics.rebooted_nodes': 0,
}
YARN_CLUSTER_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'opt_key:opt_value'
]
YARN_APP_METRICS_VALUES = {
'yarn.apps.progress': 100,
'yarn.apps.started_time': 1326815573334,
'yarn.apps.finished_time': 1326815598530,
'yarn.apps.elapsed_time': 25196,
'yarn.apps.allocated_mb': 0,
'yarn.apps.allocated_vcores': 0,
'yarn.apps.running_containers': 0,
'yarn.apps.memory_seconds': 151730,
'yarn.apps.vcore_seconds': 103,
}
YARN_APP_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'app_name:word count',
'app_queue:default',
'opt_key:opt_value'
]
YARN_NODE_METRICS_VALUES = {
'yarn.node.last_health_update': 1324056895432,
'yarn.node.used_memory_mb': 0,
'yarn.node.avail_memory_mb': 8192,
'yarn.node.used_virtual_cores': 0,
'yarn.node.available_virtual_cores': 8,
'yarn.node.num_containers': 0,
}
YARN_NODE_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'node_id:h2:1235',
'opt_key:opt_value'
]
YARN_ROOT_QUEUE_METRICS_VALUES = {
'yarn.queue.root.max_capacity': 100,
'yarn.queue.root.used_capacity': 35.012,
'yarn.queue.root.capacity': 100
}
YARN_ROOT_QUEUE_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'queue_name:root',
'opt_key:opt_value'
]
YARN_QUEUE_METRICS_VALUES = {
'yarn.queue.num_pending_applications': 0,
'yarn.queue.user_am_resource_limit.memory': 2587968,
'yarn.queue.user_am_resource_limit.vcores': 688,
'yarn.queue.absolute_capacity': 52.12,
'yarn.queue.user_limit_factor': 1,
'yarn.queue.user_limit': 100,
'yarn.queue.num_applications': 3,
'yarn.queue.used_am_resource.memory': 2688,
'yarn.queue.used_am_resource.vcores': 3,
'yarn.queue.absolute_used_capacity': 31.868685,
'yarn.queue.resources_used.memory': 3164800,
'yarn.queue.resources_used.vcores': 579,
'yarn.queue.am_resource_limit.vcores': 688,
'yarn.queue.am_resource_limit.memory': 2587968,
'yarn.queue.capacity': 52.12,
'yarn.queue.num_active_applications': 3,
'yarn.queue.absolute_max_capacity': 52.12,
'yarn.queue.used_capacity': 61.14484,
'yarn.queue.num_containers': 75,
'yarn.queue.max_capacity': 52.12,
'yarn.queue.max_applications': 5212,
'yarn.queue.max_applications_per_user': 5212
}
YARN_QUEUE_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'queue_name:clientqueue',
'opt_key:opt_value'
]
YARN_QUEUE_NOFOLLOW_METRICS_TAGS = [
'cluster_name:%s' % CLUSTER_NAME,
'queue_name:nofollowqueue',
'opt_key:opt_value'
]
def setUp(self):
global collected_from_app_url
collected_from_app_url = False
@mock.patch('requests.get', side_effect=requests_get_mock)
def test_check_excludes_app_metrics(self, mock_requests):
config = {
'instances': [self.YARN_CONFIG_EXCLUDING_APP]
}
self.run_check(config)
# Check that the YARN App metrics is empty
self.assertFalse(collected_from_app_url)
@mock.patch('requests.get', side_effect=requests_get_mock)
def test_check(self, mock_requests):
config = {
'instances': [self.YARN_CONFIG]
}
self.run_check(config)
# Check the YARN Cluster Metrics
for metric, value in self.YARN_CLUSTER_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_CLUSTER_METRICS_TAGS)
# Check the YARN App Metrics
for metric, value in self.YARN_APP_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_APP_METRICS_TAGS)
# Check the YARN Node Metrics
for metric, value in self.YARN_NODE_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_NODE_METRICS_TAGS)
# Check the YARN Root Queue Metrics
for metric, value in self.YARN_ROOT_QUEUE_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_ROOT_QUEUE_METRICS_TAGS)
# Check the YARN Custom Queue Metrics
for metric, value in self.YARN_QUEUE_METRICS_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.YARN_QUEUE_METRICS_TAGS)
# Check the YARN Queue Metrics from excluded queues are absent
self.assertMetric('yarn.queue.absolute_capacity', count=0, tags=self.YARN_QUEUE_NOFOLLOW_METRICS_TAGS)
| bsd-3-clause | -8,459,230,094,924,040,000 | 31.198502 | 110 | 0.591951 | false |
mgit-at/ansible | lib/ansible/modules/cloud/amazon/redshift_facts.py | 21 | 10951 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redshift_facts
author: "Jens Carl (@j-carl)"
short_description: Gather facts about Redshift cluster(s)
description:
- Gather facts about Redshift cluster(s)
version_added: "2.4"
requirements: [ boto3 ]
options:
cluster_identifier:
description:
- The prefix of cluster identifier of the Redshift cluster you are searching for.
- "This is a regular expression match with implicit '^'. Append '$' for a complete match."
required: false
aliases: ['name', 'identifier']
tags:
description:
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
to match against the security group(s) you are searching for."
required: false
extends_documentation_fragment:
- ec2
- aws
'''
EXAMPLES = '''
# Note: These examples do net set authentication details, see the AWS guide for details.
# Find all clusters
- redshift_facts:
register: redshift
# Find cluster(s) with matching tags
- redshift_facts:
tags:
env: prd
stack: monitoring
register: redshift_tags
# Find cluster(s) with matching name/prefix and tags
- redshift_facts:
tags:
env: dev
stack: web
name: user-
register: redshift_web
# Fail if no cluster(s) is/are found
- redshift_facts:
tags:
env: stg
stack: db
register: redshift_user
failed_when: "{{ redshift_user.results | length == 0 }}"
'''
RETURN = '''
# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
---
cluster_identifier:
description: Unique key to identify the cluster.
returned: success
type: string
sample: "redshift-identifier"
node_type:
description: The node type for nodes in the cluster.
returned: success
type: string
sample: "ds2.xlarge"
cluster_status:
description: Current state of the cluster.
returned: success
type: string
sample: "available"
modify_status:
description: The status of a modify operation.
returned: optional
type: string
sample: ""
master_username:
description: The master user name for the cluster.
returned: success
type: string
sample: "admin"
db_name:
description: The name of the initial database that was created when the cluster was created.
returned: success
type: string
sample: "dev"
endpoint:
description: The connection endpoint.
returned: success
type: string
sample: {
"address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
"port": 5439
}
cluster_create_time:
description: The date and time that the cluster was created.
returned: success
type: string
sample: "2016-05-10T08:33:16.629000+00:00"
automated_snapshot_retention_period:
description: The number of days that automatic cluster snapshots are retained.
returned: success
type: int
sample: 1
cluster_security_groups:
description: A list of cluster security groups that are associated with the cluster.
returned: success
type: list
sample: []
vpc_security_groups:
description: A list of VPC security groups the are associated with the cluster.
returned: success
type: list
sample: [
{
"status": "active",
"vpc_security_group_id": "sg-12cghhg"
}
]
cluster_paramater_groups:
description: The list of cluster parameters that are associated with this cluster.
returned: success
type: list
sample: [
{
"cluster_parameter_status_list": [
{
"parameter_apply_status": "in-sync",
"parameter_name": "statement_timeout"
},
{
"parameter_apply_status": "in-sync",
"parameter_name": "require_ssl"
}
],
"parameter_apply_status": "in-sync",
"parameter_group_name": "tuba"
}
]
cluster_subnet_group_name:
description: The name of the subnet group that is associated with the cluster.
returned: success
type: string
sample: "redshift-subnet"
vpc_id:
description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
returned: success
type: string
sample: "vpc-1234567"
availability_zone:
description: The name of the Availability Zone in which the cluster is located.
returned: success
type: string
sample: "us-east-1b"
preferred_maintenance_window:
description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
returned: success
type: string
sample: "tue:07:30-tue:08:00"
pending_modified_values:
description: A value that, if present, indicates that changes to the cluster are pending.
returned: success
type: dict
sample: {}
cluster_version:
description: The version ID of the Amazon Redshift engine that is running on the cluster.
returned: success
type: string
sample: "1.0"
allow_version_upgrade:
description: >
A Boolean value that, if true, indicates that major version upgrades will be applied
automatically to the cluster during the maintenance window.
returned: success
type: boolean
sample: true|false
number_of_nodes:
description: The number of compute nodes in the cluster.
returned: success
type: int
sample: 12
publicly_accessible:
description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
returned: success
type: boolean
sample: true|false
encrypted:
description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
returned: success
type: boolean
sample: true|false
restore_status:
description: A value that describes the status of a cluster restore action.
returned: success
type: dict
sample: {}
hsm_status:
description: >
A value that reports whether the Amazon Redshift cluster has finished applying any hardware
security module (HSM) settings changes specified in a modify cluster command.
returned: success
type: dict
sample: {}
cluster_snapshot_copy_status:
description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
returned: success
type: dict
sample: {}
cluster_public_keys:
description: The public key for the cluster.
returned: success
type: string
sample: "ssh-rsa anjigfam Amazon-Redshift\n"
cluster_nodes:
description: The nodes in the cluster.
returned: success
type: list
sample: [
{
"node_role": "LEADER",
"private_ip_address": "10.0.0.1",
"public_ip_address": "x.x.x.x"
},
{
"node_role": "COMPUTE-1",
"private_ip_address": "10.0.0.3",
"public_ip_address": "x.x.x.x"
}
]
elastic_ip_status:
description: The status of the elastic IP (EIP) address.
returned: success
type: dict
sample: {}
cluster_revision_number:
description: The specific revision number of the database in the cluster.
returned: success
type: string
sample: "1231"
tags:
description: The list of tags for the cluster.
returned: success
type: list
sample: []
kms_key_id:
description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
returned: success
type: string
sample: ""
enhanced_vpc_routing:
description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
returned: success
type: boolean
sample: true|false
iam_roles:
description: List of IAM roles attached to the cluster.
returned: success
type: list
sample: []
'''
import re
import traceback
try:
from botocore.exception import ClientError
except ImportError:
pass # will be picked up from imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, get_aws_connection_info
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict
def match_tags(tags_to_match, cluster):
for key, value in tags_to_match.items():
for tag in cluster['Tags']:
if key == tag['Key'] and value == tag['Value']:
return True
return False
def find_clusters(conn, module, identifier=None, tags=None):
try:
cluster_paginator = conn.get_paginator('describe_clusters')
clusters = cluster_paginator.paginate().build_full_result()
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
matched_clusters = []
if identifier is not None:
identifier_prog = re.compile('^' + identifier)
for cluster in clusters['Clusters']:
matched_identifier = True
if identifier:
matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
matched_tags = True
if tags:
matched_tags = match_tags(tags, cluster)
if matched_identifier and matched_tags:
matched_clusters.append(camel_dict_to_snake_dict(cluster))
return matched_clusters
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
tags=dict(type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
cluster_identifier = module.params.get('cluster_identifier')
cluster_tags = module.params.get('tags')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
redshift = boto3_conn(module, conn_type='client', resource='redshift', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
module.exit_json(results=results)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,607,319,947,037,392,000 | 29.33518 | 134 | 0.664779 | false |
shakhat/os-failures | examples/due.py | 1 | 1568 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os_failures
def main():
# cloud config schema is an extension to os-client-config
cloud_config = {
'auth': {
'username': 'admin',
'password': 'admin',
'project_name': 'admin',
},
'region_name': 'RegionOne',
'cloud_management': {
'driver': 'devstack',
'address': 'devstack.local',
'username': 'developer',
}
}
logging.info('# Create connection')
distractor = os_failures.connect(cloud_config)
logging.info('# Verify connection to the cloud')
distractor.verify()
logging.info('# Get a particular service in the cloud')
service = distractor.get_service(name='keystone-api')
logging.info('Keystone API Service: %s', service)
logging.info('# Restart the service')
service.restart()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO)
main()
| apache-2.0 | 1,203,793,679,706,250,800 | 29.745098 | 75 | 0.638393 | false |
dienerpiske/QSabe | QSabe/settings.py | 1 | 3692 | """
Django settings for QSabe project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
#TEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
LOGIN_REDIRECT_URL = '/qsabe/home/'
LOGIN_URL = '/qsabe/'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qa6re6z%i19(mx*(chswc&me1w1gq0$@6ts#h*m7!^1s+p1in%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'sqlite3DB'),
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'QSabeApp',
'django.contrib.admin',
'taggit',
'dajaxice',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'QSabe.urls'
WSGI_APPLICATION = 'QSabe.wsgi.application'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#( 'assets', os.path.join(os.path.dirname(__file__), 'assets').replace('\\', '/') ), # encontrando a pasta assets
#os.path.join(BASE_DIR , '../assets/'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
APPEND_SLASH = False
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'dajaxice.finders.DajaxiceFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
#'C:\Users\Enza\Desktop'
BASE_DIR + '/templates'
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
| gpl-2.0 | -2,893,252,307,346,829,000 | 27.301587 | 116 | 0.691495 | false |
pyfisch/servo | tests/wpt/web-platform-tests/tools/third_party/hpack/test/test_hpack.py | 36 | 28446 | # -*- coding: utf-8 -*-
from hpack.hpack import Encoder, Decoder, _dict_to_iterable, _to_bytes
from hpack.exceptions import (
HPACKDecodingError, InvalidTableIndex, OversizedHeaderListError,
InvalidTableSizeError
)
from hpack.struct import HeaderTuple, NeverIndexedHeaderTuple
import itertools
import pytest
from hypothesis import given
from hypothesis.strategies import text, binary, sets, one_of
try:
unicode = unicode
except NameError:
unicode = str
class TestHPACKEncoder(object):
# These tests are stolen entirely from the IETF specification examples.
def test_literal_header_field_with_indexing(self):
"""
The header field representation uses a literal name and a literal
value.
"""
e = Encoder()
header_set = {'custom-key': 'custom-header'}
result = b'\x40\x0acustom-key\x0dcustom-header'
assert e.encode(header_set, huffman=False) == result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in header_set.items()
]
def test_sensitive_headers(self):
"""
Test encoding header values
"""
e = Encoder()
result = (b'\x82\x14\x88\x63\xa1\xa9' +
b'\x32\x08\x73\xd0\xc7\x10' +
b'\x87\x25\xa8\x49\xe9\xea' +
b'\x5f\x5f\x89\x41\x6a\x41' +
b'\x92\x6e\xe5\x35\x52\x9f')
header_set = [
(':method', 'GET', True),
(':path', '/jimiscool/', True),
('customkey', 'sensitiveinfo', True),
]
assert e.encode(header_set, huffman=True) == result
def test_non_sensitive_headers_with_header_tuples(self):
"""
A header field stored in a HeaderTuple emits a representation that
allows indexing.
"""
e = Encoder()
result = (b'\x82\x44\x88\x63\xa1\xa9' +
b'\x32\x08\x73\xd0\xc7\x40' +
b'\x87\x25\xa8\x49\xe9\xea' +
b'\x5f\x5f\x89\x41\x6a\x41' +
b'\x92\x6e\xe5\x35\x52\x9f')
header_set = [
HeaderTuple(':method', 'GET'),
HeaderTuple(':path', '/jimiscool/'),
HeaderTuple('customkey', 'sensitiveinfo'),
]
assert e.encode(header_set, huffman=True) == result
def test_sensitive_headers_with_header_tuples(self):
"""
A header field stored in a NeverIndexedHeaderTuple emits a
representation that forbids indexing.
"""
e = Encoder()
result = (b'\x82\x14\x88\x63\xa1\xa9' +
b'\x32\x08\x73\xd0\xc7\x10' +
b'\x87\x25\xa8\x49\xe9\xea' +
b'\x5f\x5f\x89\x41\x6a\x41' +
b'\x92\x6e\xe5\x35\x52\x9f')
header_set = [
NeverIndexedHeaderTuple(':method', 'GET'),
NeverIndexedHeaderTuple(':path', '/jimiscool/'),
NeverIndexedHeaderTuple('customkey', 'sensitiveinfo'),
]
assert e.encode(header_set, huffman=True) == result
def test_header_table_size_getter(self):
e = Encoder()
assert e.header_table_size == 4096
def test_indexed_literal_header_field_with_indexing(self):
"""
The header field representation uses an indexed name and a literal
value and performs incremental indexing.
"""
e = Encoder()
header_set = {':path': '/sample/path'}
result = b'\x44\x0c/sample/path'
assert e.encode(header_set, huffman=False) == result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in header_set.items()
]
def test_indexed_header_field(self):
"""
The header field representation uses an indexed header field, from
the static table.
"""
e = Encoder()
header_set = {':method': 'GET'}
result = b'\x82'
assert e.encode(header_set, huffman=False) == result
assert list(e.header_table.dynamic_entries) == []
def test_indexed_header_field_from_static_table(self):
e = Encoder()
e.header_table_size = 0
header_set = {':method': 'GET'}
result = b'\x82'
# Make sure we don't emit an encoding context update.
e.header_table.resized = False
assert e.encode(header_set, huffman=False) == result
assert list(e.header_table.dynamic_entries) == []
def test_request_examples_without_huffman(self):
"""
This section shows several consecutive header sets, corresponding to
HTTP requests, on the same connection.
"""
e = Encoder()
first_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com'),
]
# We should have :authority in first_header_table since we index it
first_header_table = [(':authority', 'www.example.com')]
first_result = b'\x82\x86\x84\x41\x0fwww.example.com'
assert e.encode(first_header_set, huffman=False) == first_result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in first_header_table
]
second_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com',),
('cache-control', 'no-cache'),
]
second_header_table = [
('cache-control', 'no-cache'),
(':authority', 'www.example.com')
]
second_result = b'\x82\x86\x84\xbeX\x08no-cache'
assert e.encode(second_header_set, huffman=False) == second_result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in second_header_table
]
third_header_set = [
(':method', 'GET',),
(':scheme', 'https',),
(':path', '/index.html',),
(':authority', 'www.example.com',),
('custom-key', 'custom-value'),
]
third_result = (
b'\x82\x87\x85\xbf@\ncustom-key\x0ccustom-value'
)
assert e.encode(third_header_set, huffman=False) == third_result
# Don't check the header table here, it's just too complex to be
# reliable. Check its length though.
assert len(e.header_table.dynamic_entries) == 3
def test_request_examples_with_huffman(self):
"""
This section shows the same examples as the previous section, but
using Huffman encoding for the literal values.
"""
e = Encoder()
first_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com'),
]
first_header_table = [(':authority', 'www.example.com')]
first_result = (
b'\x82\x86\x84\x41\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
)
assert e.encode(first_header_set, huffman=True) == first_result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in first_header_table
]
second_header_table = [
('cache-control', 'no-cache'),
(':authority', 'www.example.com')
]
second_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com',),
('cache-control', 'no-cache'),
]
second_result = b'\x82\x86\x84\xbeX\x86\xa8\xeb\x10d\x9c\xbf'
assert e.encode(second_header_set, huffman=True) == second_result
assert list(e.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8'))
for n, v in second_header_table
]
third_header_set = [
(':method', 'GET',),
(':scheme', 'https',),
(':path', '/index.html',),
(':authority', 'www.example.com',),
('custom-key', 'custom-value'),
]
third_result = (
b'\x82\x87\x85\xbf'
b'@\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8\xb4\xbf'
)
assert e.encode(third_header_set, huffman=True) == third_result
assert len(e.header_table.dynamic_entries) == 3
# These tests are custom, for hyper.
def test_resizing_header_table(self):
# We need to encode a substantial number of headers, to populate the
# header table.
e = Encoder()
header_set = [
(':method', 'GET'),
(':scheme', 'https'),
(':path', '/some/path'),
(':authority', 'www.example.com'),
('custom-key', 'custom-value'),
(
"user-agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) "
"Gecko/20100101 Firefox/16.0",
),
(
"accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;"
"q=0.8",
),
('X-Lukasa-Test', '88989'),
]
e.encode(header_set, huffman=True)
# Resize the header table to a size so small that nothing can be in it.
e.header_table_size = 40
assert len(e.header_table.dynamic_entries) == 0
def test_resizing_header_table_sends_multiple_updates(self):
e = Encoder()
e.header_table_size = 40
e.header_table_size = 100
e.header_table_size = 40
header_set = [(':method', 'GET')]
out = e.encode(header_set, huffman=True)
assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
def test_resizing_header_table_to_same_size_ignored(self):
e = Encoder()
# These size changes should be ignored
e.header_table_size = 4096
e.header_table_size = 4096
e.header_table_size = 4096
# These size changes should be encoded
e.header_table_size = 40
e.header_table_size = 100
e.header_table_size = 40
header_set = [(':method', 'GET')]
out = e.encode(header_set, huffman=True)
assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
def test_resizing_header_table_sends_context_update(self):
e = Encoder()
# Resize the header table to a size so small that nothing can be in it.
e.header_table_size = 40
# Now, encode a header set. Just a small one, with a well-defined
# output.
header_set = [(':method', 'GET')]
out = e.encode(header_set, huffman=True)
assert out == b'?\t\x82'
def test_setting_table_size_to_the_same_does_nothing(self):
e = Encoder()
# Set the header table size to the default.
e.header_table_size = 4096
# Now encode a header set. Just a small one, with a well-defined
# output.
header_set = [(':method', 'GET')]
out = e.encode(header_set, huffman=True)
assert out == b'\x82'
def test_evicting_header_table_objects(self):
e = Encoder()
# Set the header table size large enough to include one header.
e.header_table_size = 66
header_set = [('a', 'b'), ('long-custom-header', 'longish value')]
e.encode(header_set)
assert len(e.header_table.dynamic_entries) == 1
class TestHPACKDecoder(object):
# These tests are stolen entirely from the IETF specification examples.
def test_literal_header_field_with_indexing(self):
"""
The header field representation uses a literal name and a literal
value.
"""
d = Decoder()
header_set = [('custom-key', 'custom-header')]
data = b'\x40\x0acustom-key\x0dcustom-header'
assert d.decode(data) == header_set
assert list(d.header_table.dynamic_entries) == [
(n.encode('utf-8'), v.encode('utf-8')) for n, v in header_set
]
def test_raw_decoding(self):
"""
The header field representation is decoded as a raw byte string instead
of UTF-8
"""
d = Decoder()
header_set = [
(b'\x00\x01\x99\x30\x11\x22\x55\x21\x89\x14', b'custom-header')
]
data = (
b'\x40\x0a\x00\x01\x99\x30\x11\x22\x55\x21\x89\x14\x0d'
b'custom-header'
)
assert d.decode(data, raw=True) == header_set
def test_literal_header_field_without_indexing(self):
"""
The header field representation uses an indexed name and a literal
value.
"""
d = Decoder()
header_set = [(':path', '/sample/path')]
data = b'\x04\x0c/sample/path'
assert d.decode(data) == header_set
assert list(d.header_table.dynamic_entries) == []
def test_header_table_size_getter(self):
d = Decoder()
assert d.header_table_size
def test_indexed_header_field(self):
"""
The header field representation uses an indexed header field, from
the static table.
"""
d = Decoder()
header_set = [(':method', 'GET')]
data = b'\x82'
assert d.decode(data) == header_set
assert list(d.header_table.dynamic_entries) == []
def test_request_examples_without_huffman(self):
"""
This section shows several consecutive header sets, corresponding to
HTTP requests, on the same connection.
"""
d = Decoder()
first_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com'),
]
# The first_header_table doesn't contain 'authority'
first_data = b'\x82\x86\x84\x01\x0fwww.example.com'
assert d.decode(first_data) == first_header_set
assert list(d.header_table.dynamic_entries) == []
# This request takes advantage of the differential encoding of header
# sets.
second_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com',),
('cache-control', 'no-cache'),
]
second_data = (
b'\x82\x86\x84\x01\x0fwww.example.com\x0f\t\x08no-cache'
)
assert d.decode(second_data) == second_header_set
assert list(d.header_table.dynamic_entries) == []
third_header_set = [
(':method', 'GET',),
(':scheme', 'https',),
(':path', '/index.html',),
(':authority', 'www.example.com',),
('custom-key', 'custom-value'),
]
third_data = (
b'\x82\x87\x85\x01\x0fwww.example.com@\ncustom-key\x0ccustom-value'
)
assert d.decode(third_data) == third_header_set
# Don't check the header table here, it's just too complex to be
# reliable. Check its length though.
assert len(d.header_table.dynamic_entries) == 1
def test_request_examples_with_huffman(self):
"""
This section shows the same examples as the previous section, but
using Huffman encoding for the literal values.
"""
d = Decoder()
first_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com'),
]
first_data = (
b'\x82\x86\x84\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
)
assert d.decode(first_data) == first_header_set
assert list(d.header_table.dynamic_entries) == []
second_header_set = [
(':method', 'GET',),
(':scheme', 'http',),
(':path', '/',),
(':authority', 'www.example.com',),
('cache-control', 'no-cache'),
]
second_data = (
b'\x82\x86\x84\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
b'\x0f\t\x86\xa8\xeb\x10d\x9c\xbf'
)
assert d.decode(second_data) == second_header_set
assert list(d.header_table.dynamic_entries) == []
third_header_set = [
(':method', 'GET',),
(':scheme', 'https',),
(':path', '/index.html',),
(':authority', 'www.example.com',),
('custom-key', 'custom-value'),
]
third_data = (
b'\x82\x87\x85\x01\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff@'
b'\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8\xb4\xbf'
)
assert d.decode(third_data) == third_header_set
assert len(d.header_table.dynamic_entries) == 1
# These tests are custom, for hyper.
def test_resizing_header_table(self):
# We need to decode a substantial number of headers, to populate the
# header table. This string isn't magic: it's the output from the
# equivalent test for the Encoder.
d = Decoder()
data = (
b'\x82\x87D\x87a\x07\xa4\xacV4\xcfA\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0'
b'\xab\x90\xf4\xff@\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8'
b'\xb4\xbfz\xbc\xd0\x7ff\xa2\x81\xb0\xda\xe0S\xfa\xd02\x1a\xa4\x9d'
b'\x13\xfd\xa9\x92\xa4\x96\x854\x0c\x8aj\xdc\xa7\xe2\x81\x02\xef}'
b'\xa9g{\x81qp\x7fjb):\x9d\x81\x00 \x00@\x150\x9a\xc2\xca\x7f,\x05'
b'\xc5\xc1S\xb0I|\xa5\x89\xd3M\x1fC\xae\xba\x0cA\xa4\xc7\xa9\x8f3'
b'\xa6\x9a?\xdf\x9ah\xfa\x1du\xd0b\r&=Ly\xa6\x8f\xbe\xd0\x01w\xfe'
b'\xbeX\xf9\xfb\xed\x00\x17{@\x8a\xfc[=\xbdF\x81\xad\xbc\xa8O\x84y'
b'\xe7\xde\x7f'
)
d.decode(data)
# Resize the header table to a size so small that nothing can be in it.
d.header_table_size = 40
assert len(d.header_table.dynamic_entries) == 0
def test_apache_trafficserver(self):
# This test reproduces the bug in #110, using exactly the same header
# data.
d = Decoder()
data = (
b'\x10\x07:status\x03200@\x06server\tATS/6.0.0'
b'@\x04date\x1dTue, 31 Mar 2015 08:09:51 GMT'
b'@\x0ccontent-type\ttext/html@\x0econtent-length\x0542468'
b'@\rlast-modified\x1dTue, 31 Mar 2015 01:55:51 GMT'
b'@\x04vary\x0fAccept-Encoding@\x04etag\x0f"5519fea7-a5e4"'
b'@\x08x-served\x05Nginx@\x14x-subdomain-tryfiles\x04True'
b'@\x07x-deity\thydra-lts@\raccept-ranges\x05bytes@\x03age\x010'
b'@\x19strict-transport-security\rmax-age=86400'
b'@\x03via2https/1.1 ATS (ApacheTrafficServer/6.0.0 [cSsNfU])'
)
expect = [
(':status', '200'),
('server', 'ATS/6.0.0'),
('date', 'Tue, 31 Mar 2015 08:09:51 GMT'),
('content-type', 'text/html'),
('content-length', '42468'),
('last-modified', 'Tue, 31 Mar 2015 01:55:51 GMT'),
('vary', 'Accept-Encoding'),
('etag', '"5519fea7-a5e4"'),
('x-served', 'Nginx'),
('x-subdomain-tryfiles', 'True'),
('x-deity', 'hydra-lts'),
('accept-ranges', 'bytes'),
('age', '0'),
('strict-transport-security', 'max-age=86400'),
('via', 'https/1.1 ATS (ApacheTrafficServer/6.0.0 [cSsNfU])'),
]
result = d.decode(data)
assert result == expect
# The status header shouldn't be indexed.
assert len(d.header_table.dynamic_entries) == len(expect) - 1
def test_utf8_errors_raise_hpack_decoding_error(self):
d = Decoder()
# Invalid UTF-8 data.
data = b'\x82\x86\x84\x01\x10www.\x07\xaa\xd7\x95\xd7\xa8\xd7\x94.com'
with pytest.raises(HPACKDecodingError):
d.decode(data)
def test_invalid_indexed_literal(self):
d = Decoder()
# Refer to an index that is too large.
data = b'\x82\x86\x84\x7f\x0a\x0fwww.example.com'
with pytest.raises(InvalidTableIndex):
d.decode(data)
def test_invalid_indexed_header(self):
d = Decoder()
# Refer to an indexed header that is too large.
data = b'\xBE\x86\x84\x01\x0fwww.example.com'
with pytest.raises(InvalidTableIndex):
d.decode(data)
def test_literal_header_field_with_indexing_emits_headertuple(self):
"""
A header field with indexing emits a HeaderTuple.
"""
d = Decoder()
data = b'\x00\x0acustom-key\x0dcustom-header'
headers = d.decode(data)
assert len(headers) == 1
header = headers[0]
assert isinstance(header, HeaderTuple)
assert not isinstance(header, NeverIndexedHeaderTuple)
def test_literal_never_indexed_emits_neverindexedheadertuple(self):
"""
A literal header field that must never be indexed emits a
NeverIndexedHeaderTuple.
"""
d = Decoder()
data = b'\x10\x0acustom-key\x0dcustom-header'
headers = d.decode(data)
assert len(headers) == 1
header = headers[0]
assert isinstance(header, NeverIndexedHeaderTuple)
def test_indexed_never_indexed_emits_neverindexedheadertuple(self):
"""
A header field with an indexed name that must never be indexed emits a
NeverIndexedHeaderTuple.
"""
d = Decoder()
data = b'\x14\x0c/sample/path'
headers = d.decode(data)
assert len(headers) == 1
header = headers[0]
assert isinstance(header, NeverIndexedHeaderTuple)
def test_max_header_list_size(self):
"""
If the header block is larger than the max_header_list_size, the HPACK
decoder throws an OversizedHeaderListError.
"""
d = Decoder(max_header_list_size=44)
data = b'\x14\x0c/sample/path'
with pytest.raises(OversizedHeaderListError):
d.decode(data)
def test_can_decode_multiple_header_table_size_changes(self):
"""
If multiple header table size changes are sent in at once, they are
successfully decoded.
"""
d = Decoder()
data = b'?a?\xe1\x1f\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
expect = [
(':method', 'GET'),
(':scheme', 'https'),
(':path', '/'),
(':authority', '127.0.0.1:8443')
]
assert d.decode(data) == expect
def test_header_table_size_change_above_maximum(self):
"""
If a header table size change is received that exceeds the maximum
allowed table size, it is rejected.
"""
d = Decoder()
d.max_allowed_table_size = 127
data = b'?a\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
with pytest.raises(InvalidTableSizeError):
d.decode(data)
def test_table_size_not_adjusting(self):
"""
If the header table size is shrunk, and then the remote peer doesn't
join in the shrinking, then an error is raised.
"""
d = Decoder()
d.max_allowed_table_size = 128
data = b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
with pytest.raises(InvalidTableSizeError):
d.decode(data)
def test_table_size_last_rejected(self):
"""
If a header table size change comes last in the header block, it is
forbidden.
"""
d = Decoder()
data = b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99?a'
with pytest.raises(HPACKDecodingError):
d.decode(data)
def test_table_size_middle_rejected(self):
"""
If a header table size change comes anywhere but first in the header
block, it is forbidden.
"""
d = Decoder()
data = b'\x82?a\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
with pytest.raises(HPACKDecodingError):
d.decode(data)
def test_truncated_header_name(self):
"""
If a header name is truncated an error is raised.
"""
d = Decoder()
# This is a simple header block that has a bad ending. The interesting
# part begins on the second line. This indicates a string that has
# literal name and value. The name is a 5 character huffman-encoded
# string that is only three bytes long.
data = (
b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
b'\x00\x85\xf2\xb2J'
)
with pytest.raises(HPACKDecodingError):
d.decode(data)
def test_truncated_header_value(self):
"""
If a header value is truncated an error is raised.
"""
d = Decoder()
# This is a simple header block that has a bad ending. The interesting
# part begins on the second line. This indicates a string that has
# literal name and value. The name is a 5 character huffman-encoded
# string, but the entire EOS character has been written over the end.
# This causes hpack to see the header value as being supposed to be
# 622462 bytes long, which it clearly is not, and so this must fail.
data = (
b'\x82\x87\x84A\x8a\x08\x9d\\\x0b\x81p\xdcy\xa6\x99'
b'\x00\x85\xf2\xb2J\x87\xff\xff\xff\xfd%B\x7f'
)
with pytest.raises(HPACKDecodingError):
d.decode(data)
class TestDictToIterable(object):
"""
The dict_to_iterable function has some subtle requirements: validates that
everything behaves as expected.
As much as possible this tries to be exhaustive.
"""
keys = one_of(
text().filter(lambda k: k and not k.startswith(u':')),
binary().filter(lambda k: k and not k.startswith(b':'))
)
@given(
special_keys=sets(keys),
boring_keys=sets(keys),
)
def test_ordering(self, special_keys, boring_keys):
"""
_dict_to_iterable produces an iterable where all the keys beginning
with a colon are emitted first.
"""
def _prepend_colon(k):
if isinstance(k, unicode):
return u':' + k
else:
return b':' + k
special_keys = set(map(_prepend_colon, special_keys))
input_dict = {
k: b'testval' for k in itertools.chain(
special_keys,
boring_keys
)
}
filtered = _dict_to_iterable(input_dict)
received_special = set()
received_boring = set()
for _ in special_keys:
k, _ = next(filtered)
received_special.add(k)
for _ in boring_keys:
k, _ = next(filtered)
received_boring.add(k)
assert special_keys == received_special
assert boring_keys == received_boring
@given(
special_keys=sets(keys),
boring_keys=sets(keys),
)
def test_ordering_applies_to_encoding(self, special_keys, boring_keys):
"""
When encoding a dictionary the special keys all appear first.
"""
def _prepend_colon(k):
if isinstance(k, unicode):
return u':' + k
else:
return b':' + k
special_keys = set(map(_prepend_colon, special_keys))
input_dict = {
k: b'testval' for k in itertools.chain(
special_keys,
boring_keys
)
}
e = Encoder()
d = Decoder()
encoded = e.encode(input_dict)
decoded = iter(d.decode(encoded, raw=True))
received_special = set()
received_boring = set()
expected_special = set(map(_to_bytes, special_keys))
expected_boring = set(map(_to_bytes, boring_keys))
for _ in special_keys:
k, _ = next(decoded)
received_special.add(k)
for _ in boring_keys:
k, _ = next(decoded)
received_boring.add(k)
assert expected_special == received_special
assert expected_boring == received_boring
| mpl-2.0 | 2,928,928,070,157,458,000 | 33.355072 | 79 | 0.549181 | false |
kenCode-de/bitshares-wallet | dev/app/dl/programs/bloom.py | 17 | 1188 | #!/usr/bin/env python3
# Matching exmaple:
# ./bloom.py BTS87mopaNqLDjT1BvzqQR3QjWzWSTgkWnMcwt5sqxHuavCBi1s3m
# Sample program by theoriticalbts used to create js implementation
import hashlib
import sys
k = sys.argv[1]
# specified with -s parameter
size = 8388608
# specified with -n parameter
hashes = 3
with open("bloom.dat", "rb") as f:
filter_result = True
for i in range(hashes):
x = (str(i)+":"+k).encode("UTF-8")
print("getting hash of ", repr(x))
hash = hashlib.sha256(x).hexdigest()
print("hash value is ", repr(hash))
bit_address = int(hash, 16) % size
print("bit address is", hex(bit_address))
byte_address = bit_address >> 3
print("byte address is", hex(byte_address))
mask = 1 << (bit_address & 7)
print("mask is", hex(mask))
f.seek(byte_address)
b = f.read(1)
print("byte is", hex(b[0]))
# print("b[0] & mask", b[0] & mask)
ok = (b[0] & mask)
print("ok", ok, ok == 0)
if ok == 0:
filter_result = False
print("returning False result")
break
print("filter_result is", filter_result)
| gpl-3.0 | 8,034,388,661,803,937,000 | 26.627907 | 67 | 0.578283 | false |
uniite/imd | src v1/accel-old.py | 1 | 1757 | def check_input ():
global X
global Y
global Xavg
global Yavg
global Xavgc
global Yavgc
c = 0
avg = 0
for i in range(0, 15):
#tty.write("x")
X = eval(tty.read(30)[4:9])
if X < 700 and X > 300:
avg += X
c += 1
print "XAvg: %s" % (avg / c)
X = avg / c
print "Xavg: %s" % (Xavg / Xavgc)
if X > (Xavg / Xavgc) + 15:
menu_back()
sleep(.4)
Yavg += Y
Yavgc += 1
return
elif X < (Xavg / Xavgc) - 15:
menu_action()
sleep(.4)
Yavg += Y
Yavgc += 1
return
else:
Xavg += X * 2
Xavgc += 2
c = 0
avg = 0
for i in range(0, 5):
#tty.write("y")
Y = eval(tty.read(30)[14:19])
if Y < 700 and Y > 300:
avg += Y
c += 1
Y = avg / c
print "YAvg: %s" % (avg / c)
print "Yavg: %s" % (Yavg / Yavgc)
if Y < (Yavg / Yavgc) - 15:
menu_down()
elif Y > (Yavg / Yavgc) + 15:
menu_up()
else:
Yavg += Y * 2
Yavgc += 2
# Claibrate Accelerometer
def calibrate ():
global X
global Y
global Xavg
global Yavg
global Xavgc
global Yavgc
Xavg = 0
Yavg = 0
Xavgc = 0
Yavgc = 0
i = 5
while i > -1:
oled.clear()
oled.flush()
oled.textbox(x = 40, y = 50, text = "Calibrating in %s" % i)
oled.flush()
oled.redraw()
oled.flush()
sleep(1)
i -= 1
i = 250
i2 = 0
while i > 0:
tty.write("x")
s = tty.read(9)
X = eval(s[4:])
if X < 700 and X > 300:
Xavg += X
Xavgc += 1
print "Xavg: " + str(Xavg / Xavgc)
tty.write("y")
s = tty.read(9)
Y = eval(s[4:])
if Y < 700 and Y > 300:
Yavg += Y
Yavgc += 1
print "Yavg: " + str(Yavg / Yavgc)
if i2 == 30:
oled.clear()
oled.flush()
oled.textbox(x = 35, y = 50, text = "%s percent done..." % ((250 - i) / 2.5))
oled.flush()
oled.redraw()
oled.flush()
i2 = 0
sleep(0.02)
i2 += 1
i -= 1
menu_home() | mit | -6,176,184,541,109,291,000 | 16.067961 | 80 | 0.525896 | false |
sanguinariojoe/FreeCAD | src/Mod/Fem/femexamples/meshes/mesh_square_pipe_end_twisted_tria6.py | 16 | 130810 | def create_nodes(femmesh):
# nodes
femmesh.addNode(-142.5, 142.5, 0.0, 1)
femmesh.addNode(-142.5, -142.5, 0.0, 2)
femmesh.addNode(-142.5, 142.5, 1000.0, 3)
femmesh.addNode(-142.5, -142.5, 1000.0, 4)
femmesh.addNode(-142.5, 95.0, 0.0, 5)
femmesh.addNode(-142.5, 47.5, 0.0, 6)
femmesh.addNode(-142.5, 0.0, 0.0, 7)
femmesh.addNode(-142.5, -47.5, 0.0, 8)
femmesh.addNode(-142.5, -95.0, 0.0, 9)
femmesh.addNode(-142.5, 142.5, 50.0, 10)
femmesh.addNode(-142.5, 142.5, 100.0, 11)
femmesh.addNode(-142.5, 142.5, 150.0, 12)
femmesh.addNode(-142.5, 142.5, 200.0, 13)
femmesh.addNode(-142.5, 142.5, 250.0, 14)
femmesh.addNode(-142.5, 142.5, 300.0, 15)
femmesh.addNode(-142.5, 142.5, 350.0, 16)
femmesh.addNode(-142.5, 142.5, 400.0, 17)
femmesh.addNode(-142.5, 142.5, 450.0, 18)
femmesh.addNode(-142.5, 142.5, 500.0, 19)
femmesh.addNode(-142.5, 142.5, 550.0, 20)
femmesh.addNode(-142.5, 142.5, 600.0, 21)
femmesh.addNode(-142.5, 142.5, 650.0, 22)
femmesh.addNode(-142.5, 142.5, 700.0, 23)
femmesh.addNode(-142.5, 142.5, 750.0, 24)
femmesh.addNode(-142.5, 142.5, 800.0, 25)
femmesh.addNode(-142.5, 142.5, 850.0, 26)
femmesh.addNode(-142.5, 142.5, 900.0, 27)
femmesh.addNode(-142.5, 142.5, 950.0, 28)
femmesh.addNode(-142.5, 95.0, 1000.0, 29)
femmesh.addNode(-142.5, 47.5, 1000.0, 30)
femmesh.addNode(-142.5, 0.0, 1000.0, 31)
femmesh.addNode(-142.5, -47.5, 1000.0, 32)
femmesh.addNode(-142.5, -95.0, 1000.0, 33)
femmesh.addNode(-142.5, -142.5, 50.0, 34)
femmesh.addNode(-142.5, -142.5, 100.0, 35)
femmesh.addNode(-142.5, -142.5, 150.0, 36)
femmesh.addNode(-142.5, -142.5, 200.0, 37)
femmesh.addNode(-142.5, -142.5, 250.0, 38)
femmesh.addNode(-142.5, -142.5, 300.0, 39)
femmesh.addNode(-142.5, -142.5, 350.0, 40)
femmesh.addNode(-142.5, -142.5, 400.0, 41)
femmesh.addNode(-142.5, -142.5, 450.0, 42)
femmesh.addNode(-142.5, -142.5, 500.0, 43)
femmesh.addNode(-142.5, -142.5, 550.0, 44)
femmesh.addNode(-142.5, -142.5, 600.0, 45)
femmesh.addNode(-142.5, -142.5, 650.0, 46)
femmesh.addNode(-142.5, -142.5, 700.0, 47)
femmesh.addNode(-142.5, -142.5, 750.0, 48)
femmesh.addNode(-142.5, -142.5, 800.0, 49)
femmesh.addNode(-142.5, -142.5, 850.0, 50)
femmesh.addNode(-142.5, -142.5, 900.0, 51)
femmesh.addNode(-142.5, -142.5, 950.0, 52)
femmesh.addNode(-142.5, 78.2801, 37.6071, 53)
femmesh.addNode(-142.5, 82.8778, 81.1309, 54)
femmesh.addNode(-142.5, 83.7987, 132.243, 55)
femmesh.addNode(-142.5, 26.5975, 51.4008, 56)
femmesh.addNode(-142.5, -24.9195, 38.6958, 57)
femmesh.addNode(-142.5, 28.3071, 109.277, 58)
femmesh.addNode(-142.5, 28.6437, 161.076, 59)
femmesh.addNode(-142.5, -25.856, 84.5406, 60)
femmesh.addNode(-142.5, -26.1584, 137.428, 61)
femmesh.addNode(-142.5, 84.3969, 181.746, 62)
femmesh.addNode(-142.5, 84.2758, 232.086, 63)
femmesh.addNode(-142.5, 84.2827, 282.032, 64)
femmesh.addNode(-142.5, 84.1337, 332.651, 65)
femmesh.addNode(-142.5, 29.2616, 210.927, 66)
femmesh.addNode(-142.5, 29.3967, 260.858, 67)
femmesh.addNode(-142.5, -26.0842, 188.506, 68)
femmesh.addNode(-142.5, -25.9315, 238.681, 69)
femmesh.addNode(-142.5, 29.1757, 311.391, 70)
femmesh.addNode(-142.5, 28.7076, 361.897, 71)
femmesh.addNode(-142.5, -25.946, 288.852, 72)
femmesh.addNode(-142.5, -26.2889, 339.654, 73)
femmesh.addNode(-142.5, -79.9661, 55.4097, 74)
femmesh.addNode(-142.5, -83.1462, 115.644, 75)
femmesh.addNode(-142.5, -83.7177, 168.069, 76)
femmesh.addNode(-142.5, -83.7677, 218.68, 77)
femmesh.addNode(-142.5, -83.7551, 268.845, 78)
femmesh.addNode(-142.5, -83.8444, 319.301, 79)
femmesh.addNode(-142.5, -83.832, 369.493, 80)
femmesh.addNode(-142.5, 84.4685, 381.956, 81)
femmesh.addNode(-142.5, 84.2929, 432.126, 82)
femmesh.addNode(-142.5, 84.2867, 482.039, 83)
femmesh.addNode(-142.5, 84.1346, 532.653, 84)
femmesh.addNode(-142.5, 29.0852, 411.479, 85)
femmesh.addNode(-142.5, 29.3382, 461.022, 86)
femmesh.addNode(-142.5, -26.5143, 389.778, 87)
femmesh.addNode(-142.5, -26.0976, 439.168, 88)
femmesh.addNode(-142.5, 29.1615, 511.431, 89)
femmesh.addNode(-142.5, 28.887, 561.607, 90)
femmesh.addNode(-142.5, -26.0013, 489.005, 91)
femmesh.addNode(-142.5, -26.0668, 539.096, 92)
femmesh.addNode(-142.5, 84.4986, 581.908, 93)
femmesh.addNode(-142.5, 84.2979, 632.118, 94)
femmesh.addNode(-142.5, 84.2875, 682.038, 95)
femmesh.addNode(-142.5, 84.1347, 732.652, 96)
femmesh.addNode(-142.5, 29.3337, 611.072, 97)
femmesh.addNode(-142.5, 29.4161, 660.894, 98)
femmesh.addNode(-142.5, -26.0193, 588.998, 99)
femmesh.addNode(-142.5, -25.9036, 638.814, 100)
femmesh.addNode(-142.5, 29.1806, 711.4, 101)
femmesh.addNode(-142.5, 28.7088, 761.899, 102)
femmesh.addNode(-142.5, -25.9364, 688.886, 103)
femmesh.addNode(-142.5, -26.286, 739.662, 104)
femmesh.addNode(-142.5, -83.898, 419.24, 105)
femmesh.addNode(-142.5, -83.8166, 469.052, 106)
femmesh.addNode(-142.5, -83.8027, 519.019, 107)
femmesh.addNode(-142.5, -83.8003, 569.008, 108)
femmesh.addNode(-142.5, -83.7654, 618.944, 109)
femmesh.addNode(-142.5, -83.7483, 668.918, 110)
femmesh.addNode(-142.5, -83.8412, 719.32, 111)
femmesh.addNode(-142.5, -83.8309, 769.498, 112)
femmesh.addNode(-142.5, 84.4689, 781.957, 113)
femmesh.addNode(-142.5, 84.293, 832.127, 114)
femmesh.addNode(-142.5, 83.9065, 883.976, 115)
femmesh.addNode(-142.5, 80.6013, 944.348, 116)
femmesh.addNode(-142.5, 29.0855, 811.479, 117)
femmesh.addNode(-142.5, 29.2116, 861.667, 118)
femmesh.addNode(-142.5, -26.5134, 789.781, 119)
femmesh.addNode(-142.5, -26.129, 839.33, 120)
femmesh.addNode(-142.5, 28.4518, 914.46, 121)
femmesh.addNode(-142.5, 26.4487, 961.198, 122)
femmesh.addNode(-142.5, -26.0999, 891.348, 123)
femmesh.addNode(-142.5, -25.5438, 949.788, 124)
femmesh.addNode(-142.5, -83.9044, 819.276, 125)
femmesh.addNode(-142.5, -83.834, 869.725, 126)
femmesh.addNode(-142.5, -83.119, 920.663, 127)
femmesh.addNode(-142.5, -78.7325, 964.09, 128)
femmesh.addNode(142.5, -142.5, 0.0, 129)
femmesh.addNode(142.5, -142.5, 1000.0, 130)
femmesh.addNode(-95.0, -142.5, 0.0, 131)
femmesh.addNode(-47.5, -142.5, 0.0, 132)
femmesh.addNode(0.0, -142.5, 0.0, 133)
femmesh.addNode(47.5, -142.5, 0.0, 134)
femmesh.addNode(95.0, -142.5, 0.0, 135)
femmesh.addNode(-95.0, -142.5, 1000.0, 136)
femmesh.addNode(-47.5, -142.5, 1000.0, 137)
femmesh.addNode(0.0, -142.5, 1000.0, 138)
femmesh.addNode(47.5, -142.5, 1000.0, 139)
femmesh.addNode(95.0, -142.5, 1000.0, 140)
femmesh.addNode(142.5, -142.5, 50.0, 141)
femmesh.addNode(142.5, -142.5, 100.0, 142)
femmesh.addNode(142.5, -142.5, 150.0, 143)
femmesh.addNode(142.5, -142.5, 200.0, 144)
femmesh.addNode(142.5, -142.5, 250.0, 145)
femmesh.addNode(142.5, -142.5, 300.0, 146)
femmesh.addNode(142.5, -142.5, 350.0, 147)
femmesh.addNode(142.5, -142.5, 400.0, 148)
femmesh.addNode(142.5, -142.5, 450.0, 149)
femmesh.addNode(142.5, -142.5, 500.0, 150)
femmesh.addNode(142.5, -142.5, 550.0, 151)
femmesh.addNode(142.5, -142.5, 600.0, 152)
femmesh.addNode(142.5, -142.5, 650.0, 153)
femmesh.addNode(142.5, -142.5, 700.0, 154)
femmesh.addNode(142.5, -142.5, 750.0, 155)
femmesh.addNode(142.5, -142.5, 800.0, 156)
femmesh.addNode(142.5, -142.5, 850.0, 157)
femmesh.addNode(142.5, -142.5, 900.0, 158)
femmesh.addNode(142.5, -142.5, 950.0, 159)
femmesh.addNode(-78.2801, -142.5, 37.6071, 160)
femmesh.addNode(-82.8778, -142.5, 81.1309, 161)
femmesh.addNode(-83.7987, -142.5, 132.243, 162)
femmesh.addNode(-26.5975, -142.5, 51.4008, 163)
femmesh.addNode(24.9195, -142.5, 38.6958, 164)
femmesh.addNode(-28.3071, -142.5, 109.277, 165)
femmesh.addNode(-28.6437, -142.5, 161.076, 166)
femmesh.addNode(25.856, -142.5, 84.5406, 167)
femmesh.addNode(26.1584, -142.5, 137.428, 168)
femmesh.addNode(-84.3969, -142.5, 181.746, 169)
femmesh.addNode(-84.2758, -142.5, 232.086, 170)
femmesh.addNode(-84.2827, -142.5, 282.032, 171)
femmesh.addNode(-84.1337, -142.5, 332.651, 172)
femmesh.addNode(-29.2616, -142.5, 210.927, 173)
femmesh.addNode(-29.3967, -142.5, 260.858, 174)
femmesh.addNode(26.0842, -142.5, 188.506, 175)
femmesh.addNode(25.9315, -142.5, 238.681, 176)
femmesh.addNode(-29.1757, -142.5, 311.391, 177)
femmesh.addNode(-28.7076, -142.5, 361.897, 178)
femmesh.addNode(25.946, -142.5, 288.852, 179)
femmesh.addNode(26.2889, -142.5, 339.654, 180)
femmesh.addNode(79.9661, -142.5, 55.4097, 181)
femmesh.addNode(83.1462, -142.5, 115.644, 182)
femmesh.addNode(83.7177, -142.5, 168.069, 183)
femmesh.addNode(83.7677, -142.5, 218.68, 184)
femmesh.addNode(83.7551, -142.5, 268.845, 185)
femmesh.addNode(83.8444, -142.5, 319.301, 186)
femmesh.addNode(83.832, -142.5, 369.493, 187)
femmesh.addNode(-84.4685, -142.5, 381.956, 188)
femmesh.addNode(-84.2929, -142.5, 432.126, 189)
femmesh.addNode(-84.2867, -142.5, 482.039, 190)
femmesh.addNode(-84.1346, -142.5, 532.653, 191)
femmesh.addNode(-29.0852, -142.5, 411.479, 192)
femmesh.addNode(-29.3382, -142.5, 461.022, 193)
femmesh.addNode(26.5143, -142.5, 389.778, 194)
femmesh.addNode(26.0976, -142.5, 439.168, 195)
femmesh.addNode(-29.1615, -142.5, 511.431, 196)
femmesh.addNode(-28.887, -142.5, 561.607, 197)
femmesh.addNode(26.0013, -142.5, 489.005, 198)
femmesh.addNode(26.0668, -142.5, 539.096, 199)
femmesh.addNode(-84.4986, -142.5, 581.908, 200)
femmesh.addNode(-84.2979, -142.5, 632.118, 201)
femmesh.addNode(-84.2875, -142.5, 682.038, 202)
femmesh.addNode(-84.1347, -142.5, 732.652, 203)
femmesh.addNode(-29.3337, -142.5, 611.072, 204)
femmesh.addNode(-29.4161, -142.5, 660.894, 205)
femmesh.addNode(26.0193, -142.5, 588.998, 206)
femmesh.addNode(25.9036, -142.5, 638.814, 207)
femmesh.addNode(-29.1806, -142.5, 711.4, 208)
femmesh.addNode(-28.7088, -142.5, 761.899, 209)
femmesh.addNode(25.9364, -142.5, 688.886, 210)
femmesh.addNode(26.286, -142.5, 739.662, 211)
femmesh.addNode(83.898, -142.5, 419.24, 212)
femmesh.addNode(83.8166, -142.5, 469.052, 213)
femmesh.addNode(83.8027, -142.5, 519.019, 214)
femmesh.addNode(83.8003, -142.5, 569.008, 215)
femmesh.addNode(83.7654, -142.5, 618.944, 216)
femmesh.addNode(83.7483, -142.5, 668.918, 217)
femmesh.addNode(83.8412, -142.5, 719.32, 218)
femmesh.addNode(83.8309, -142.5, 769.498, 219)
femmesh.addNode(-84.4689, -142.5, 781.957, 220)
femmesh.addNode(-84.293, -142.5, 832.127, 221)
femmesh.addNode(-83.9065, -142.5, 883.976, 222)
femmesh.addNode(-80.6013, -142.5, 944.348, 223)
femmesh.addNode(-29.0855, -142.5, 811.479, 224)
femmesh.addNode(-29.2116, -142.5, 861.667, 225)
femmesh.addNode(26.5134, -142.5, 789.781, 226)
femmesh.addNode(26.129, -142.5, 839.33, 227)
femmesh.addNode(-28.4518, -142.5, 914.46, 228)
femmesh.addNode(-26.4487, -142.5, 961.198, 229)
femmesh.addNode(26.0999, -142.5, 891.348, 230)
femmesh.addNode(25.5438, -142.5, 949.788, 231)
femmesh.addNode(83.9044, -142.5, 819.276, 232)
femmesh.addNode(83.834, -142.5, 869.725, 233)
femmesh.addNode(83.119, -142.5, 920.663, 234)
femmesh.addNode(78.7325, -142.5, 964.09, 235)
femmesh.addNode(142.5, 142.5, 0.0, 236)
femmesh.addNode(142.5, 142.5, 1000.0, 237)
femmesh.addNode(142.5, -95.0, 0.0, 238)
femmesh.addNode(142.5, -47.5, 0.0, 239)
femmesh.addNode(142.5, 0.0, 0.0, 240)
femmesh.addNode(142.5, 47.5, 0.0, 241)
femmesh.addNode(142.5, 95.0, 0.0, 242)
femmesh.addNode(142.5, -95.0, 1000.0, 243)
femmesh.addNode(142.5, -47.5, 1000.0, 244)
femmesh.addNode(142.5, 0.0, 1000.0, 245)
femmesh.addNode(142.5, 47.5, 1000.0, 246)
femmesh.addNode(142.5, 95.0, 1000.0, 247)
femmesh.addNode(142.5, 142.5, 50.0, 248)
femmesh.addNode(142.5, 142.5, 100.0, 249)
femmesh.addNode(142.5, 142.5, 150.0, 250)
femmesh.addNode(142.5, 142.5, 200.0, 251)
femmesh.addNode(142.5, 142.5, 250.0, 252)
femmesh.addNode(142.5, 142.5, 300.0, 253)
femmesh.addNode(142.5, 142.5, 350.0, 254)
femmesh.addNode(142.5, 142.5, 400.0, 255)
femmesh.addNode(142.5, 142.5, 450.0, 256)
femmesh.addNode(142.5, 142.5, 500.0, 257)
femmesh.addNode(142.5, 142.5, 550.0, 258)
femmesh.addNode(142.5, 142.5, 600.0, 259)
femmesh.addNode(142.5, 142.5, 650.0, 260)
femmesh.addNode(142.5, 142.5, 700.0, 261)
femmesh.addNode(142.5, 142.5, 750.0, 262)
femmesh.addNode(142.5, 142.5, 800.0, 263)
femmesh.addNode(142.5, 142.5, 850.0, 264)
femmesh.addNode(142.5, 142.5, 900.0, 265)
femmesh.addNode(142.5, 142.5, 950.0, 266)
femmesh.addNode(142.5, -78.2801, 37.6071, 267)
femmesh.addNode(142.5, -82.8778, 81.1309, 268)
femmesh.addNode(142.5, -83.7987, 132.243, 269)
femmesh.addNode(142.5, -26.5975, 51.4008, 270)
femmesh.addNode(142.5, 24.9195, 38.6958, 271)
femmesh.addNode(142.5, -28.3071, 109.277, 272)
femmesh.addNode(142.5, -28.6437, 161.076, 273)
femmesh.addNode(142.5, 25.856, 84.5406, 274)
femmesh.addNode(142.5, 26.1584, 137.428, 275)
femmesh.addNode(142.5, -84.3969, 181.746, 276)
femmesh.addNode(142.5, -84.2758, 232.086, 277)
femmesh.addNode(142.5, -84.2827, 282.032, 278)
femmesh.addNode(142.5, -84.1337, 332.651, 279)
femmesh.addNode(142.5, -29.2616, 210.927, 280)
femmesh.addNode(142.5, -29.3967, 260.858, 281)
femmesh.addNode(142.5, 26.0842, 188.506, 282)
femmesh.addNode(142.5, 25.9315, 238.681, 283)
femmesh.addNode(142.5, -29.1757, 311.391, 284)
femmesh.addNode(142.5, -28.7076, 361.897, 285)
femmesh.addNode(142.5, 25.946, 288.852, 286)
femmesh.addNode(142.5, 26.2889, 339.654, 287)
femmesh.addNode(142.5, 79.9661, 55.4097, 288)
femmesh.addNode(142.5, 83.1462, 115.644, 289)
femmesh.addNode(142.5, 83.7177, 168.069, 290)
femmesh.addNode(142.5, 83.7677, 218.68, 291)
femmesh.addNode(142.5, 83.7551, 268.845, 292)
femmesh.addNode(142.5, 83.8444, 319.301, 293)
femmesh.addNode(142.5, 83.832, 369.493, 294)
femmesh.addNode(142.5, -84.4685, 381.956, 295)
femmesh.addNode(142.5, -84.2929, 432.126, 296)
femmesh.addNode(142.5, -84.2867, 482.039, 297)
femmesh.addNode(142.5, -84.1346, 532.653, 298)
femmesh.addNode(142.5, -29.0852, 411.479, 299)
femmesh.addNode(142.5, -29.3382, 461.022, 300)
femmesh.addNode(142.5, 26.5143, 389.778, 301)
femmesh.addNode(142.5, 26.0976, 439.168, 302)
femmesh.addNode(142.5, -29.1615, 511.431, 303)
femmesh.addNode(142.5, -28.887, 561.607, 304)
femmesh.addNode(142.5, 26.0013, 489.005, 305)
femmesh.addNode(142.5, 26.0668, 539.096, 306)
femmesh.addNode(142.5, -84.4986, 581.908, 307)
femmesh.addNode(142.5, -84.2979, 632.118, 308)
femmesh.addNode(142.5, -84.2875, 682.038, 309)
femmesh.addNode(142.5, -84.1347, 732.652, 310)
femmesh.addNode(142.5, -29.3337, 611.072, 311)
femmesh.addNode(142.5, -29.4161, 660.894, 312)
femmesh.addNode(142.5, 26.0193, 588.998, 313)
femmesh.addNode(142.5, 25.9036, 638.814, 314)
femmesh.addNode(142.5, -29.1806, 711.4, 315)
femmesh.addNode(142.5, -28.7088, 761.899, 316)
femmesh.addNode(142.5, 25.9364, 688.886, 317)
femmesh.addNode(142.5, 26.286, 739.662, 318)
femmesh.addNode(142.5, 83.898, 419.24, 319)
femmesh.addNode(142.5, 83.8166, 469.052, 320)
femmesh.addNode(142.5, 83.8027, 519.019, 321)
femmesh.addNode(142.5, 83.8003, 569.008, 322)
femmesh.addNode(142.5, 83.7654, 618.944, 323)
femmesh.addNode(142.5, 83.7483, 668.918, 324)
femmesh.addNode(142.5, 83.8412, 719.32, 325)
femmesh.addNode(142.5, 83.8309, 769.498, 326)
femmesh.addNode(142.5, -84.4689, 781.957, 327)
femmesh.addNode(142.5, -84.293, 832.127, 328)
femmesh.addNode(142.5, -83.9065, 883.976, 329)
femmesh.addNode(142.5, -80.6013, 944.348, 330)
femmesh.addNode(142.5, -29.0855, 811.479, 331)
femmesh.addNode(142.5, -29.2116, 861.667, 332)
femmesh.addNode(142.5, 26.5134, 789.781, 333)
femmesh.addNode(142.5, 26.129, 839.33, 334)
femmesh.addNode(142.5, -28.4518, 914.46, 335)
femmesh.addNode(142.5, -26.4487, 961.198, 336)
femmesh.addNode(142.5, 26.0999, 891.348, 337)
femmesh.addNode(142.5, 25.5438, 949.788, 338)
femmesh.addNode(142.5, 83.9044, 819.276, 339)
femmesh.addNode(142.5, 83.834, 869.725, 340)
femmesh.addNode(142.5, 83.119, 920.663, 341)
femmesh.addNode(142.5, 78.7325, 964.09, 342)
femmesh.addNode(95.0, 142.5, 0.0, 343)
femmesh.addNode(47.5, 142.5, 0.0, 344)
femmesh.addNode(0.0, 142.5, 0.0, 345)
femmesh.addNode(-47.5, 142.5, 0.0, 346)
femmesh.addNode(-95.0, 142.5, 0.0, 347)
femmesh.addNode(95.0, 142.5, 1000.0, 348)
femmesh.addNode(47.5, 142.5, 1000.0, 349)
femmesh.addNode(0.0, 142.5, 1000.0, 350)
femmesh.addNode(-47.5, 142.5, 1000.0, 351)
femmesh.addNode(-95.0, 142.5, 1000.0, 352)
femmesh.addNode(78.2801, 142.5, 37.6071, 353)
femmesh.addNode(82.8778, 142.5, 81.1309, 354)
femmesh.addNode(83.7987, 142.5, 132.243, 355)
femmesh.addNode(26.5975, 142.5, 51.4008, 356)
femmesh.addNode(-24.9195, 142.5, 38.6958, 357)
femmesh.addNode(28.3071, 142.5, 109.277, 358)
femmesh.addNode(28.6437, 142.5, 161.076, 359)
femmesh.addNode(-25.856, 142.5, 84.5406, 360)
femmesh.addNode(-26.1584, 142.5, 137.428, 361)
femmesh.addNode(84.3969, 142.5, 181.746, 362)
femmesh.addNode(84.2758, 142.5, 232.086, 363)
femmesh.addNode(84.2827, 142.5, 282.032, 364)
femmesh.addNode(84.1337, 142.5, 332.651, 365)
femmesh.addNode(29.2616, 142.5, 210.927, 366)
femmesh.addNode(29.3967, 142.5, 260.858, 367)
femmesh.addNode(-26.0842, 142.5, 188.506, 368)
femmesh.addNode(-25.9315, 142.5, 238.681, 369)
femmesh.addNode(29.1757, 142.5, 311.391, 370)
femmesh.addNode(28.7076, 142.5, 361.897, 371)
femmesh.addNode(-25.946, 142.5, 288.852, 372)
femmesh.addNode(-26.2889, 142.5, 339.654, 373)
femmesh.addNode(-79.9661, 142.5, 55.4097, 374)
femmesh.addNode(-83.1462, 142.5, 115.644, 375)
femmesh.addNode(-83.7177, 142.5, 168.069, 376)
femmesh.addNode(-83.7677, 142.5, 218.68, 377)
femmesh.addNode(-83.7551, 142.5, 268.845, 378)
femmesh.addNode(-83.8444, 142.5, 319.301, 379)
femmesh.addNode(-83.832, 142.5, 369.493, 380)
femmesh.addNode(84.4685, 142.5, 381.956, 381)
femmesh.addNode(84.2929, 142.5, 432.126, 382)
femmesh.addNode(84.2867, 142.5, 482.039, 383)
femmesh.addNode(84.1346, 142.5, 532.653, 384)
femmesh.addNode(29.0852, 142.5, 411.479, 385)
femmesh.addNode(29.3382, 142.5, 461.022, 386)
femmesh.addNode(-26.5143, 142.5, 389.778, 387)
femmesh.addNode(-26.0976, 142.5, 439.168, 388)
femmesh.addNode(29.1615, 142.5, 511.431, 389)
femmesh.addNode(28.887, 142.5, 561.607, 390)
femmesh.addNode(-26.0013, 142.5, 489.005, 391)
femmesh.addNode(-26.0668, 142.5, 539.096, 392)
femmesh.addNode(84.4986, 142.5, 581.908, 393)
femmesh.addNode(84.2979, 142.5, 632.118, 394)
femmesh.addNode(84.2875, 142.5, 682.038, 395)
femmesh.addNode(84.1347, 142.5, 732.652, 396)
femmesh.addNode(29.3337, 142.5, 611.072, 397)
femmesh.addNode(29.4161, 142.5, 660.894, 398)
femmesh.addNode(-26.0193, 142.5, 588.998, 399)
femmesh.addNode(-25.9036, 142.5, 638.814, 400)
femmesh.addNode(29.1806, 142.5, 711.4, 401)
femmesh.addNode(28.7088, 142.5, 761.899, 402)
femmesh.addNode(-25.9364, 142.5, 688.886, 403)
femmesh.addNode(-26.286, 142.5, 739.662, 404)
femmesh.addNode(-83.898, 142.5, 419.24, 405)
femmesh.addNode(-83.8166, 142.5, 469.052, 406)
femmesh.addNode(-83.8027, 142.5, 519.019, 407)
femmesh.addNode(-83.8003, 142.5, 569.008, 408)
femmesh.addNode(-83.7654, 142.5, 618.944, 409)
femmesh.addNode(-83.7483, 142.5, 668.918, 410)
femmesh.addNode(-83.8412, 142.5, 719.32, 411)
femmesh.addNode(-83.8309, 142.5, 769.498, 412)
femmesh.addNode(84.4689, 142.5, 781.957, 413)
femmesh.addNode(84.293, 142.5, 832.127, 414)
femmesh.addNode(83.9065, 142.5, 883.976, 415)
femmesh.addNode(80.6013, 142.5, 944.348, 416)
femmesh.addNode(29.0855, 142.5, 811.479, 417)
femmesh.addNode(29.2116, 142.5, 861.667, 418)
femmesh.addNode(-26.5134, 142.5, 789.781, 419)
femmesh.addNode(-26.129, 142.5, 839.33, 420)
femmesh.addNode(28.4518, 142.5, 914.46, 421)
femmesh.addNode(26.4487, 142.5, 961.198, 422)
femmesh.addNode(-26.0999, 142.5, 891.348, 423)
femmesh.addNode(-25.5438, 142.5, 949.788, 424)
femmesh.addNode(-83.9044, 142.5, 819.276, 425)
femmesh.addNode(-83.834, 142.5, 869.725, 426)
femmesh.addNode(-83.119, 142.5, 920.663, 427)
femmesh.addNode(-78.7325, 142.5, 964.09, 428)
femmesh.addNode(-142.5, 118.75, 0.0, 429)
femmesh.addNode(-142.5, 142.5, 25.0, 430)
femmesh.addNode(-142.5, -118.75, 0.0, 431)
femmesh.addNode(-142.5, -142.5, 25.0, 432)
femmesh.addNode(-142.5, 142.5, 975.0, 433)
femmesh.addNode(-142.5, 118.75, 1000.0, 434)
femmesh.addNode(-142.5, -118.75, 1000.0, 435)
femmesh.addNode(-142.5, -142.5, 975.0, 436)
femmesh.addNode(-142.5, 71.25, 0.0, 437)
femmesh.addNode(-142.5, 118.75, 25.0, 438)
femmesh.addNode(-142.5, 86.6401, 18.8035, 439)
femmesh.addNode(-142.5, 23.75, 0.0, 440)
femmesh.addNode(-142.5, 62.8901, 18.8035, 441)
femmesh.addNode(-142.5, 37.0488, 25.7004, 442)
femmesh.addNode(-142.5, -23.75, 0.0, 443)
femmesh.addNode(-142.5, 13.2988, 25.7004, 444)
femmesh.addNode(-142.5, -12.4598, 19.3479, 445)
femmesh.addNode(-142.5, -71.25, 0.0, 446)
femmesh.addNode(-142.5, -36.2098, 19.3479, 447)
femmesh.addNode(-142.5, -63.7331, 27.7049, 448)
femmesh.addNode(-142.5, -118.75, 25.0, 449)
femmesh.addNode(-142.5, -87.4831, 27.7049, 450)
femmesh.addNode(-142.5, 142.5, 75.0, 451)
femmesh.addNode(-142.5, 110.39, 43.8035, 452)
femmesh.addNode(-142.5, 112.689, 65.5654, 453)
femmesh.addNode(-142.5, 142.5, 125.0, 454)
femmesh.addNode(-142.5, 112.689, 90.5654, 455)
femmesh.addNode(-142.5, 113.149, 116.121, 456)
femmesh.addNode(-142.5, 142.5, 175.0, 457)
femmesh.addNode(-142.5, 113.149, 141.121, 458)
femmesh.addNode(-142.5, 113.448, 165.873, 459)
femmesh.addNode(-142.5, 142.5, 225.0, 460)
femmesh.addNode(-142.5, 113.448, 190.873, 461)
femmesh.addNode(-142.5, 113.388, 216.043, 462)
femmesh.addNode(-142.5, 142.5, 275.0, 463)
femmesh.addNode(-142.5, 113.388, 241.043, 464)
femmesh.addNode(-142.5, 113.391, 266.016, 465)
femmesh.addNode(-142.5, 142.5, 325.0, 466)
femmesh.addNode(-142.5, 113.391, 291.016, 467)
femmesh.addNode(-142.5, 113.317, 316.326, 468)
femmesh.addNode(-142.5, 142.5, 375.0, 469)
femmesh.addNode(-142.5, 113.317, 341.326, 470)
femmesh.addNode(-142.5, 113.484, 365.978, 471)
femmesh.addNode(-142.5, 142.5, 425.0, 472)
femmesh.addNode(-142.5, 113.484, 390.978, 473)
femmesh.addNode(-142.5, 113.396, 416.063, 474)
femmesh.addNode(-142.5, 142.5, 475.0, 475)
femmesh.addNode(-142.5, 113.396, 441.063, 476)
femmesh.addNode(-142.5, 113.393, 466.02, 477)
femmesh.addNode(-142.5, 142.5, 525.0, 478)
femmesh.addNode(-142.5, 113.393, 491.02, 479)
femmesh.addNode(-142.5, 113.317, 516.326, 480)
femmesh.addNode(-142.5, 142.5, 575.0, 481)
femmesh.addNode(-142.5, 113.317, 541.326, 482)
femmesh.addNode(-142.5, 113.499, 565.954, 483)
femmesh.addNode(-142.5, 142.5, 625.0, 484)
femmesh.addNode(-142.5, 113.499, 590.954, 485)
femmesh.addNode(-142.5, 113.399, 616.059, 486)
femmesh.addNode(-142.5, 142.5, 675.0, 487)
femmesh.addNode(-142.5, 113.399, 641.059, 488)
femmesh.addNode(-142.5, 113.394, 666.019, 489)
femmesh.addNode(-142.5, 142.5, 725.0, 490)
femmesh.addNode(-142.5, 113.394, 691.019, 491)
femmesh.addNode(-142.5, 113.317, 716.326, 492)
femmesh.addNode(-142.5, 142.5, 775.0, 493)
femmesh.addNode(-142.5, 113.317, 741.326, 494)
femmesh.addNode(-142.5, 113.484, 765.978, 495)
femmesh.addNode(-142.5, 142.5, 825.0, 496)
femmesh.addNode(-142.5, 113.484, 790.978, 497)
femmesh.addNode(-142.5, 113.396, 816.063, 498)
femmesh.addNode(-142.5, 142.5, 875.0, 499)
femmesh.addNode(-142.5, 113.396, 841.063, 500)
femmesh.addNode(-142.5, 113.203, 866.988, 501)
femmesh.addNode(-142.5, 142.5, 925.0, 502)
femmesh.addNode(-142.5, 113.203, 891.988, 503)
femmesh.addNode(-142.5, 111.551, 922.174, 504)
femmesh.addNode(-142.5, 118.75, 975.0, 505)
femmesh.addNode(-142.5, 111.551, 947.174, 506)
femmesh.addNode(-142.5, 71.25, 1000.0, 507)
femmesh.addNode(-142.5, 87.8006, 972.174, 508)
femmesh.addNode(-142.5, 23.75, 1000.0, 509)
femmesh.addNode(-142.5, 64.0506, 972.174, 510)
femmesh.addNode(-142.5, 36.9743, 980.599, 511)
femmesh.addNode(-142.5, -23.75, 1000.0, 512)
femmesh.addNode(-142.5, 13.2243, 980.599, 513)
femmesh.addNode(-142.5, -12.7719, 974.894, 514)
femmesh.addNode(-142.5, -71.25, 1000.0, 515)
femmesh.addNode(-142.5, -36.5219, 974.894, 516)
femmesh.addNode(-142.5, -63.1163, 982.045, 517)
femmesh.addNode(-142.5, -118.75, 975.0, 518)
femmesh.addNode(-142.5, -86.8663, 982.045, 519)
femmesh.addNode(-142.5, -142.5, 75.0, 520)
femmesh.addNode(-142.5, -111.233, 52.7049, 521)
femmesh.addNode(-142.5, -142.5, 125.0, 522)
femmesh.addNode(-142.5, -111.233, 77.7049, 523)
femmesh.addNode(-142.5, -112.823, 107.822, 524)
femmesh.addNode(-142.5, -142.5, 175.0, 525)
femmesh.addNode(-142.5, -112.823, 132.822, 526)
femmesh.addNode(-142.5, -113.109, 159.034, 527)
femmesh.addNode(-142.5, -142.5, 225.0, 528)
femmesh.addNode(-142.5, -113.109, 184.034, 529)
femmesh.addNode(-142.5, -113.134, 209.34, 530)
femmesh.addNode(-142.5, -142.5, 275.0, 531)
femmesh.addNode(-142.5, -113.134, 234.34, 532)
femmesh.addNode(-142.5, -113.128, 259.422, 533)
femmesh.addNode(-142.5, -142.5, 325.0, 534)
femmesh.addNode(-142.5, -113.128, 284.422, 535)
femmesh.addNode(-142.5, -113.172, 309.65, 536)
femmesh.addNode(-142.5, -142.5, 375.0, 537)
femmesh.addNode(-142.5, -113.172, 334.65, 538)
femmesh.addNode(-142.5, -113.166, 359.747, 539)
femmesh.addNode(-142.5, -142.5, 425.0, 540)
femmesh.addNode(-142.5, -113.166, 384.747, 541)
femmesh.addNode(-142.5, -113.199, 409.62, 542)
femmesh.addNode(-142.5, -142.5, 475.0, 543)
femmesh.addNode(-142.5, -113.199, 434.62, 544)
femmesh.addNode(-142.5, -113.158, 459.526, 545)
femmesh.addNode(-142.5, -142.5, 525.0, 546)
femmesh.addNode(-142.5, -113.158, 484.526, 547)
femmesh.addNode(-142.5, -113.151, 509.51, 548)
femmesh.addNode(-142.5, -142.5, 575.0, 549)
femmesh.addNode(-142.5, -113.151, 534.51, 550)
femmesh.addNode(-142.5, -113.15, 559.504, 551)
femmesh.addNode(-142.5, -142.5, 625.0, 552)
femmesh.addNode(-142.5, -113.15, 584.504, 553)
femmesh.addNode(-142.5, -113.133, 609.472, 554)
femmesh.addNode(-142.5, -142.5, 675.0, 555)
femmesh.addNode(-142.5, -113.133, 634.472, 556)
femmesh.addNode(-142.5, -113.124, 659.459, 557)
femmesh.addNode(-142.5, -142.5, 725.0, 558)
femmesh.addNode(-142.5, -113.124, 684.459, 559)
femmesh.addNode(-142.5, -113.171, 709.66, 560)
femmesh.addNode(-142.5, -142.5, 775.0, 561)
femmesh.addNode(-142.5, -113.171, 734.66, 562)
femmesh.addNode(-142.5, -113.165, 759.749, 563)
femmesh.addNode(-142.5, -142.5, 825.0, 564)
femmesh.addNode(-142.5, -113.165, 784.749, 565)
femmesh.addNode(-142.5, -113.202, 809.638, 566)
femmesh.addNode(-142.5, -142.5, 875.0, 567)
femmesh.addNode(-142.5, -113.202, 834.638, 568)
femmesh.addNode(-142.5, -113.167, 859.862, 569)
femmesh.addNode(-142.5, -142.5, 925.0, 570)
femmesh.addNode(-142.5, -113.167, 884.862, 571)
femmesh.addNode(-142.5, -112.809, 910.331, 572)
femmesh.addNode(-142.5, -112.809, 935.331, 573)
femmesh.addNode(-142.5, -110.616, 957.045, 574)
femmesh.addNode(-142.5, 80.579, 59.369, 575)
femmesh.addNode(-142.5, 52.4388, 44.5039, 576)
femmesh.addNode(-142.5, 83.3383, 106.687, 577)
femmesh.addNode(-142.5, 54.7377, 66.2659, 578)
femmesh.addNode(-142.5, 55.5925, 95.204, 579)
femmesh.addNode(-142.5, 56.0529, 120.76, 580)
femmesh.addNode(-142.5, 56.2212, 146.66, 581)
femmesh.addNode(-142.5, 84.0978, 156.995, 582)
femmesh.addNode(-142.5, 0.839004, 45.0483, 583)
femmesh.addNode(-142.5, 27.4523, 80.3389, 584)
femmesh.addNode(-142.5, 0.370743, 67.9707, 585)
femmesh.addNode(-142.5, -25.3878, 61.6182, 586)
femmesh.addNode(-142.5, -52.4428, 47.0528, 587)
femmesh.addNode(-142.5, 28.4754, 135.177, 588)
femmesh.addNode(-142.5, 1.22553, 96.9088, 589)
femmesh.addNode(-142.5, 1.07434, 123.353, 590)
femmesh.addNode(-142.5, 1.24264, 149.252, 591)
femmesh.addNode(-142.5, 56.5203, 171.411, 592)
femmesh.addNode(-142.5, 28.9526, 186.002, 593)
femmesh.addNode(-142.5, 1.27977, 174.791, 594)
femmesh.addNode(-142.5, -26.0072, 110.985, 595)
femmesh.addNode(-142.5, -52.9111, 69.9752, 596)
femmesh.addNode(-142.5, -54.5011, 100.092, 597)
femmesh.addNode(-142.5, -26.1213, 162.967, 598)
femmesh.addNode(-142.5, -54.6523, 126.536, 599)
femmesh.addNode(-142.5, -54.938, 152.749, 600)
femmesh.addNode(-142.5, 84.3363, 206.916, 601)
femmesh.addNode(-142.5, 56.8292, 196.337, 602)
femmesh.addNode(-142.5, 84.2793, 257.059, 603)
femmesh.addNode(-142.5, 56.7687, 221.506, 604)
femmesh.addNode(-142.5, 56.8363, 246.472, 605)
femmesh.addNode(-142.5, 84.2082, 307.341, 606)
femmesh.addNode(-142.5, 56.8397, 271.445, 607)
femmesh.addNode(-142.5, 56.7292, 296.711, 608)
femmesh.addNode(-142.5, 56.6547, 322.021, 609)
femmesh.addNode(-142.5, 56.4206, 347.274, 610)
femmesh.addNode(-142.5, 84.3011, 357.304, 611)
femmesh.addNode(-142.5, 29.3292, 235.893, 612)
femmesh.addNode(-142.5, 1.58871, 199.717, 613)
femmesh.addNode(-142.5, 1.66506, 224.804, 614)
femmesh.addNode(-142.5, 1.73262, 249.769, 615)
femmesh.addNode(-142.5, 29.2862, 286.125, 616)
femmesh.addNode(-142.5, 1.72536, 274.855, 617)
femmesh.addNode(-142.5, -26.0078, 213.593, 618)
femmesh.addNode(-142.5, -54.9009, 178.287, 619)
femmesh.addNode(-142.5, -54.9259, 203.593, 620)
femmesh.addNode(-142.5, -25.9387, 263.766, 621)
femmesh.addNode(-142.5, -54.8496, 228.68, 622)
femmesh.addNode(-142.5, -54.8433, 253.763, 623)
femmesh.addNode(-142.5, 28.9416, 336.644, 624)
femmesh.addNode(-142.5, 1.61485, 300.122, 625)
femmesh.addNode(-142.5, 1.44341, 325.522, 626)
femmesh.addNode(-142.5, 1.20936, 350.775, 627)
femmesh.addNode(-142.5, 56.588, 371.927, 628)
femmesh.addNode(-142.5, 28.8964, 386.688, 629)
femmesh.addNode(-142.5, 1.09664, 375.838, 630)
femmesh.addNode(-142.5, -26.1174, 314.253, 631)
femmesh.addNode(-142.5, -54.8505, 278.848, 632)
femmesh.addNode(-142.5, -54.8952, 304.076, 633)
femmesh.addNode(-142.5, -55.0667, 329.477, 634)
femmesh.addNode(-142.5, -55.0604, 354.573, 635)
femmesh.addNode(-142.5, -26.4016, 364.716, 636)
femmesh.addNode(-142.5, -81.5561, 85.5268, 637)
femmesh.addNode(-142.5, -83.4319, 141.856, 638)
femmesh.addNode(-142.5, -83.7427, 193.374, 639)
femmesh.addNode(-142.5, -83.7614, 243.762, 640)
femmesh.addNode(-142.5, -83.7998, 294.073, 641)
femmesh.addNode(-142.5, -83.8382, 344.397, 642)
femmesh.addNode(-142.5, -55.1731, 379.636, 643)
femmesh.addNode(-142.5, -83.865, 394.367, 644)
femmesh.addNode(-142.5, 84.3807, 407.041, 645)
femmesh.addNode(-142.5, 56.7768, 396.717, 646)
femmesh.addNode(-142.5, 84.2898, 457.083, 647)
femmesh.addNode(-142.5, 56.6891, 421.803, 648)
femmesh.addNode(-142.5, 56.8156, 446.574, 649)
femmesh.addNode(-142.5, 84.2106, 507.346, 650)
femmesh.addNode(-142.5, 56.8125, 471.53, 651)
femmesh.addNode(-142.5, 56.7241, 496.735, 652)
femmesh.addNode(-142.5, 56.6481, 522.042, 653)
femmesh.addNode(-142.5, 56.5108, 547.13, 654)
femmesh.addNode(-142.5, 84.3166, 557.28, 655)
femmesh.addNode(-142.5, 29.2117, 436.25, 656)
femmesh.addNode(-142.5, 1.28543, 400.628, 657)
femmesh.addNode(-142.5, 1.4938, 425.323, 658)
femmesh.addNode(-142.5, 1.62033, 450.095, 659)
femmesh.addNode(-142.5, 29.2499, 486.226, 660)
femmesh.addNode(-142.5, 1.66847, 475.013, 661)
femmesh.addNode(-142.5, -26.3059, 414.473, 662)
femmesh.addNode(-142.5, -55.2062, 404.509, 663)
femmesh.addNode(-142.5, -26.0494, 464.087, 664)
femmesh.addNode(-142.5, -54.9978, 429.204, 665)
femmesh.addNode(-142.5, -54.9571, 454.11, 666)
femmesh.addNode(-142.5, 29.0243, 536.519, 667)
femmesh.addNode(-142.5, 1.58012, 500.218, 668)
femmesh.addNode(-142.5, 1.54736, 525.264, 669)
femmesh.addNode(-142.5, 1.4101, 550.352, 670)
femmesh.addNode(-142.5, 56.6928, 571.757, 671)
femmesh.addNode(-142.5, 29.1104, 586.339, 672)
femmesh.addNode(-142.5, 1.43386, 575.302, 673)
femmesh.addNode(-142.5, -26.0341, 514.051, 674)
femmesh.addNode(-142.5, -54.9089, 479.029, 675)
femmesh.addNode(-142.5, -54.902, 504.012, 676)
femmesh.addNode(-142.5, -26.0431, 564.047, 677)
femmesh.addNode(-142.5, -54.9347, 529.058, 678)
femmesh.addNode(-142.5, -54.9336, 554.052, 679)
femmesh.addNode(-142.5, 84.3983, 607.013, 680)
femmesh.addNode(-142.5, 56.9161, 596.49, 681)
femmesh.addNode(-142.5, 84.2927, 657.078, 682)
femmesh.addNode(-142.5, 56.8158, 621.595, 683)
femmesh.addNode(-142.5, 56.857, 646.506, 684)
femmesh.addNode(-142.5, 84.2111, 707.345, 685)
femmesh.addNode(-142.5, 56.8518, 671.466, 686)
femmesh.addNode(-142.5, 56.734, 696.719, 687)
femmesh.addNode(-142.5, 56.6577, 722.026, 688)
femmesh.addNode(-142.5, 56.4217, 747.276, 689)
femmesh.addNode(-142.5, 84.3018, 757.304, 690)
femmesh.addNode(-142.5, 29.3749, 635.983, 691)
femmesh.addNode(-142.5, 1.65719, 600.035, 692)
femmesh.addNode(-142.5, 1.71503, 624.943, 693)
femmesh.addNode(-142.5, 1.75623, 649.854, 694)
femmesh.addNode(-142.5, 29.2983, 686.147, 695)
femmesh.addNode(-142.5, 1.73983, 674.89, 696)
femmesh.addNode(-142.5, -25.9615, 613.906, 697)
femmesh.addNode(-142.5, -54.9098, 579.003, 698)
femmesh.addNode(-142.5, -54.8924, 603.971, 699)
femmesh.addNode(-142.5, -25.92, 663.85, 700)
femmesh.addNode(-142.5, -54.8345, 628.879, 701)
femmesh.addNode(-142.5, -54.8259, 653.866, 702)
femmesh.addNode(-142.5, 28.9447, 736.649, 703)
femmesh.addNode(-142.5, 1.62209, 700.143, 704)
femmesh.addNode(-142.5, 1.44731, 725.531, 705)
femmesh.addNode(-142.5, 1.2114, 750.781, 706)
femmesh.addNode(-142.5, 56.5888, 771.928, 707)
femmesh.addNode(-142.5, 28.8971, 786.689, 708)
femmesh.addNode(-142.5, 1.09769, 775.84, 709)
femmesh.addNode(-142.5, -26.1112, 714.274, 710)
femmesh.addNode(-142.5, -54.8423, 678.902, 711)
femmesh.addNode(-142.5, -54.8888, 704.103, 712)
femmesh.addNode(-142.5, -55.0636, 729.491, 713)
femmesh.addNode(-142.5, -55.0584, 754.58, 714)
femmesh.addNode(-142.5, -26.3997, 764.721, 715)
femmesh.addNode(-142.5, -83.8573, 444.146, 716)
femmesh.addNode(-142.5, -83.8096, 494.035, 717)
femmesh.addNode(-142.5, -83.8015, 544.014, 718)
femmesh.addNode(-142.5, -83.7829, 593.976, 719)
femmesh.addNode(-142.5, -83.7569, 643.931, 720)
femmesh.addNode(-142.5, -83.7947, 694.119, 721)
femmesh.addNode(-142.5, -83.836, 744.409, 722)
femmesh.addNode(-142.5, -55.1721, 779.639, 723)
femmesh.addNode(-142.5, -83.8677, 794.387, 724)
femmesh.addNode(-142.5, 84.3809, 807.042, 725)
femmesh.addNode(-142.5, 56.7772, 796.718, 726)
femmesh.addNode(-142.5, 84.0997, 858.051, 727)
femmesh.addNode(-142.5, 56.6892, 821.803, 728)
femmesh.addNode(-142.5, 56.7523, 846.897, 729)
femmesh.addNode(-142.5, 82.2539, 914.162, 730)
femmesh.addNode(-142.5, 56.559, 872.821, 731)
femmesh.addNode(-142.5, 56.1791, 899.218, 732)
femmesh.addNode(-142.5, 54.5266, 929.404, 733)
femmesh.addNode(-142.5, 53.525, 952.773, 734)
femmesh.addNode(-142.5, 29.1485, 836.573, 735)
femmesh.addNode(-142.5, 1.28606, 800.63, 736)
femmesh.addNode(-142.5, 1.47824, 825.404, 737)
femmesh.addNode(-142.5, 1.54127, 850.498, 738)
femmesh.addNode(-142.5, 28.8317, 888.064, 739)
femmesh.addNode(-142.5, 1.55582, 876.507, 740)
femmesh.addNode(-142.5, -26.3212, 814.555, 741)
femmesh.addNode(-142.5, -55.2089, 804.528, 742)
femmesh.addNode(-142.5, -26.1145, 865.339, 743)
femmesh.addNode(-142.5, -55.0167, 829.303, 744)
femmesh.addNode(-142.5, -54.9815, 854.527, 745)
femmesh.addNode(-142.5, 27.4503, 937.829, 746)
femmesh.addNode(-142.5, 1.17596, 902.904, 747)
femmesh.addNode(-142.5, 1.45404, 932.124, 748)
femmesh.addNode(-142.5, 0.452457, 955.493, 749)
femmesh.addNode(-142.5, -25.8218, 920.568, 750)
femmesh.addNode(-142.5, -54.967, 880.536, 751)
femmesh.addNode(-142.5, -54.6094, 906.005, 752)
femmesh.addNode(-142.5, -54.3314, 935.226, 753)
femmesh.addNode(-142.5, -52.1382, 956.939, 754)
femmesh.addNode(-142.5, -83.8692, 844.501, 755)
femmesh.addNode(-142.5, -83.4765, 895.194, 756)
femmesh.addNode(-142.5, -80.9258, 942.377, 757)
femmesh.addNode(118.75, -142.5, 0.0, 758)
femmesh.addNode(142.5, -142.5, 25.0, 759)
femmesh.addNode(118.75, -142.5, 1000.0, 760)
femmesh.addNode(142.5, -142.5, 975.0, 761)
femmesh.addNode(-71.25, -142.5, 0.0, 762)
femmesh.addNode(-86.6401, -142.5, 18.8035, 763)
femmesh.addNode(-118.75, -142.5, 0.0, 764)
femmesh.addNode(-118.75, -142.5, 25.0, 765)
femmesh.addNode(-23.75, -142.5, 0.0, 766)
femmesh.addNode(-62.8901, -142.5, 18.8035, 767)
femmesh.addNode(-37.0488, -142.5, 25.7004, 768)
femmesh.addNode(23.75, -142.5, 0.0, 769)
femmesh.addNode(-13.2988, -142.5, 25.7004, 770)
femmesh.addNode(12.4598, -142.5, 19.3479, 771)
femmesh.addNode(71.25, -142.5, 0.0, 772)
femmesh.addNode(36.2098, -142.5, 19.3479, 773)
femmesh.addNode(63.7331, -142.5, 27.7049, 774)
femmesh.addNode(118.75, -142.5, 25.0, 775)
femmesh.addNode(87.4831, -142.5, 27.7049, 776)
femmesh.addNode(-71.25, -142.5, 1000.0, 777)
femmesh.addNode(-87.8006, -142.5, 972.174, 778)
femmesh.addNode(-118.75, -142.5, 975.0, 779)
femmesh.addNode(-118.75, -142.5, 1000.0, 780)
femmesh.addNode(-23.75, -142.5, 1000.0, 781)
femmesh.addNode(-64.0506, -142.5, 972.174, 782)
femmesh.addNode(-36.9743, -142.5, 980.599, 783)
femmesh.addNode(23.75, -142.5, 1000.0, 784)
femmesh.addNode(-13.2243, -142.5, 980.599, 785)
femmesh.addNode(12.7719, -142.5, 974.894, 786)
femmesh.addNode(71.25, -142.5, 1000.0, 787)
femmesh.addNode(36.5219, -142.5, 974.894, 788)
femmesh.addNode(63.1163, -142.5, 982.045, 789)
femmesh.addNode(118.75, -142.5, 975.0, 790)
femmesh.addNode(86.8663, -142.5, 982.045, 791)
femmesh.addNode(142.5, -142.5, 75.0, 792)
femmesh.addNode(111.233, -142.5, 52.7049, 793)
femmesh.addNode(142.5, -142.5, 125.0, 794)
femmesh.addNode(111.233, -142.5, 77.7049, 795)
femmesh.addNode(112.823, -142.5, 107.822, 796)
femmesh.addNode(142.5, -142.5, 175.0, 797)
femmesh.addNode(112.823, -142.5, 132.822, 798)
femmesh.addNode(113.109, -142.5, 159.034, 799)
femmesh.addNode(142.5, -142.5, 225.0, 800)
femmesh.addNode(113.109, -142.5, 184.034, 801)
femmesh.addNode(113.134, -142.5, 209.34, 802)
femmesh.addNode(142.5, -142.5, 275.0, 803)
femmesh.addNode(113.134, -142.5, 234.34, 804)
femmesh.addNode(113.128, -142.5, 259.422, 805)
femmesh.addNode(142.5, -142.5, 325.0, 806)
femmesh.addNode(113.128, -142.5, 284.422, 807)
femmesh.addNode(113.172, -142.5, 309.65, 808)
femmesh.addNode(142.5, -142.5, 375.0, 809)
femmesh.addNode(113.172, -142.5, 334.65, 810)
femmesh.addNode(113.166, -142.5, 359.747, 811)
femmesh.addNode(142.5, -142.5, 425.0, 812)
femmesh.addNode(113.166, -142.5, 384.747, 813)
femmesh.addNode(113.199, -142.5, 409.62, 814)
femmesh.addNode(142.5, -142.5, 475.0, 815)
femmesh.addNode(113.199, -142.5, 434.62, 816)
femmesh.addNode(113.158, -142.5, 459.526, 817)
femmesh.addNode(142.5, -142.5, 525.0, 818)
femmesh.addNode(113.158, -142.5, 484.526, 819)
femmesh.addNode(113.151, -142.5, 509.51, 820)
femmesh.addNode(142.5, -142.5, 575.0, 821)
femmesh.addNode(113.151, -142.5, 534.51, 822)
femmesh.addNode(113.15, -142.5, 559.504, 823)
femmesh.addNode(142.5, -142.5, 625.0, 824)
femmesh.addNode(113.15, -142.5, 584.504, 825)
femmesh.addNode(113.133, -142.5, 609.472, 826)
femmesh.addNode(142.5, -142.5, 675.0, 827)
femmesh.addNode(113.133, -142.5, 634.472, 828)
femmesh.addNode(113.124, -142.5, 659.459, 829)
femmesh.addNode(142.5, -142.5, 725.0, 830)
femmesh.addNode(113.124, -142.5, 684.459, 831)
femmesh.addNode(113.171, -142.5, 709.66, 832)
femmesh.addNode(142.5, -142.5, 775.0, 833)
femmesh.addNode(113.171, -142.5, 734.66, 834)
femmesh.addNode(113.165, -142.5, 759.749, 835)
femmesh.addNode(142.5, -142.5, 825.0, 836)
femmesh.addNode(113.165, -142.5, 784.749, 837)
femmesh.addNode(113.202, -142.5, 809.638, 838)
femmesh.addNode(142.5, -142.5, 875.0, 839)
femmesh.addNode(113.202, -142.5, 834.638, 840)
femmesh.addNode(113.167, -142.5, 859.862, 841)
femmesh.addNode(142.5, -142.5, 925.0, 842)
femmesh.addNode(113.167, -142.5, 884.862, 843)
femmesh.addNode(112.809, -142.5, 910.331, 844)
femmesh.addNode(112.809, -142.5, 935.331, 845)
femmesh.addNode(110.616, -142.5, 957.045, 846)
femmesh.addNode(-80.579, -142.5, 59.369, 847)
femmesh.addNode(-52.4388, -142.5, 44.5039, 848)
femmesh.addNode(-110.39, -142.5, 43.8035, 849)
femmesh.addNode(-83.3383, -142.5, 106.687, 850)
femmesh.addNode(-54.7377, -142.5, 66.2659, 851)
femmesh.addNode(-55.5925, -142.5, 95.204, 852)
femmesh.addNode(-112.689, -142.5, 65.5654, 853)
femmesh.addNode(-112.689, -142.5, 90.5654, 854)
femmesh.addNode(-56.0529, -142.5, 120.76, 855)
femmesh.addNode(-56.2212, -142.5, 146.66, 856)
femmesh.addNode(-84.0978, -142.5, 156.995, 857)
femmesh.addNode(-113.149, -142.5, 116.121, 858)
femmesh.addNode(-113.149, -142.5, 141.121, 859)
femmesh.addNode(-0.839004, -142.5, 45.0483, 860)
femmesh.addNode(-27.4523, -142.5, 80.3389, 861)
femmesh.addNode(-0.370743, -142.5, 67.9707, 862)
femmesh.addNode(25.3878, -142.5, 61.6182, 863)
femmesh.addNode(52.4428, -142.5, 47.0528, 864)
femmesh.addNode(-28.4754, -142.5, 135.177, 865)
femmesh.addNode(-1.22553, -142.5, 96.9088, 866)
femmesh.addNode(-1.07434, -142.5, 123.353, 867)
femmesh.addNode(-1.24264, -142.5, 149.252, 868)
femmesh.addNode(-56.5203, -142.5, 171.411, 869)
femmesh.addNode(-28.9526, -142.5, 186.002, 870)
femmesh.addNode(-1.27977, -142.5, 174.791, 871)
femmesh.addNode(26.0072, -142.5, 110.985, 872)
femmesh.addNode(52.9111, -142.5, 69.9752, 873)
femmesh.addNode(54.5011, -142.5, 100.092, 874)
femmesh.addNode(26.1213, -142.5, 162.967, 875)
femmesh.addNode(54.6523, -142.5, 126.536, 876)
femmesh.addNode(54.938, -142.5, 152.749, 877)
femmesh.addNode(-84.3363, -142.5, 206.916, 878)
femmesh.addNode(-56.8292, -142.5, 196.337, 879)
femmesh.addNode(-113.448, -142.5, 165.873, 880)
femmesh.addNode(-113.448, -142.5, 190.873, 881)
femmesh.addNode(-84.2793, -142.5, 257.059, 882)
femmesh.addNode(-56.7687, -142.5, 221.506, 883)
femmesh.addNode(-56.8363, -142.5, 246.472, 884)
femmesh.addNode(-113.388, -142.5, 216.043, 885)
femmesh.addNode(-113.388, -142.5, 241.043, 886)
femmesh.addNode(-84.2082, -142.5, 307.341, 887)
femmesh.addNode(-56.8397, -142.5, 271.445, 888)
femmesh.addNode(-56.7292, -142.5, 296.711, 889)
femmesh.addNode(-113.391, -142.5, 266.016, 890)
femmesh.addNode(-113.391, -142.5, 291.016, 891)
femmesh.addNode(-56.6547, -142.5, 322.021, 892)
femmesh.addNode(-56.4206, -142.5, 347.274, 893)
femmesh.addNode(-84.3011, -142.5, 357.304, 894)
femmesh.addNode(-113.317, -142.5, 316.326, 895)
femmesh.addNode(-113.317, -142.5, 341.326, 896)
femmesh.addNode(-29.3292, -142.5, 235.893, 897)
femmesh.addNode(-1.58871, -142.5, 199.717, 898)
femmesh.addNode(-1.66506, -142.5, 224.804, 899)
femmesh.addNode(-1.73262, -142.5, 249.769, 900)
femmesh.addNode(-29.2862, -142.5, 286.125, 901)
femmesh.addNode(-1.72536, -142.5, 274.855, 902)
femmesh.addNode(26.0078, -142.5, 213.593, 903)
femmesh.addNode(54.9009, -142.5, 178.287, 904)
femmesh.addNode(54.9259, -142.5, 203.593, 905)
femmesh.addNode(25.9387, -142.5, 263.766, 906)
femmesh.addNode(54.8496, -142.5, 228.68, 907)
femmesh.addNode(54.8433, -142.5, 253.763, 908)
femmesh.addNode(-28.9416, -142.5, 336.644, 909)
femmesh.addNode(-1.61485, -142.5, 300.122, 910)
femmesh.addNode(-1.44341, -142.5, 325.522, 911)
femmesh.addNode(-1.20936, -142.5, 350.775, 912)
femmesh.addNode(-56.588, -142.5, 371.927, 913)
femmesh.addNode(-28.8964, -142.5, 386.688, 914)
femmesh.addNode(-1.09664, -142.5, 375.838, 915)
femmesh.addNode(26.1174, -142.5, 314.253, 916)
femmesh.addNode(54.8505, -142.5, 278.848, 917)
femmesh.addNode(54.8952, -142.5, 304.076, 918)
femmesh.addNode(55.0667, -142.5, 329.477, 919)
femmesh.addNode(55.0604, -142.5, 354.573, 920)
femmesh.addNode(26.4016, -142.5, 364.716, 921)
femmesh.addNode(81.5561, -142.5, 85.5268, 922)
femmesh.addNode(83.4319, -142.5, 141.856, 923)
femmesh.addNode(83.7427, -142.5, 193.374, 924)
femmesh.addNode(83.7614, -142.5, 243.762, 925)
femmesh.addNode(83.7998, -142.5, 294.073, 926)
femmesh.addNode(83.8382, -142.5, 344.397, 927)
femmesh.addNode(55.1731, -142.5, 379.636, 928)
femmesh.addNode(83.865, -142.5, 394.367, 929)
femmesh.addNode(-84.3807, -142.5, 407.041, 930)
femmesh.addNode(-56.7768, -142.5, 396.717, 931)
femmesh.addNode(-113.484, -142.5, 365.978, 932)
femmesh.addNode(-113.484, -142.5, 390.978, 933)
femmesh.addNode(-84.2898, -142.5, 457.083, 934)
femmesh.addNode(-56.6891, -142.5, 421.803, 935)
femmesh.addNode(-56.8156, -142.5, 446.574, 936)
femmesh.addNode(-113.396, -142.5, 416.063, 937)
femmesh.addNode(-113.396, -142.5, 441.063, 938)
femmesh.addNode(-84.2106, -142.5, 507.346, 939)
femmesh.addNode(-56.8125, -142.5, 471.53, 940)
femmesh.addNode(-56.7241, -142.5, 496.735, 941)
femmesh.addNode(-113.393, -142.5, 491.02, 942)
femmesh.addNode(-113.393, -142.5, 466.02, 943)
femmesh.addNode(-56.6481, -142.5, 522.042, 944)
femmesh.addNode(-56.5108, -142.5, 547.13, 945)
femmesh.addNode(-84.3166, -142.5, 557.28, 946)
femmesh.addNode(-113.317, -142.5, 541.326, 947)
femmesh.addNode(-113.317, -142.5, 516.326, 948)
femmesh.addNode(-29.2117, -142.5, 436.25, 949)
femmesh.addNode(-1.28543, -142.5, 400.628, 950)
femmesh.addNode(-1.4938, -142.5, 425.323, 951)
femmesh.addNode(-1.62033, -142.5, 450.095, 952)
femmesh.addNode(-29.2499, -142.5, 486.226, 953)
femmesh.addNode(-1.66847, -142.5, 475.013, 954)
femmesh.addNode(26.3059, -142.5, 414.473, 955)
femmesh.addNode(55.2062, -142.5, 404.509, 956)
femmesh.addNode(26.0494, -142.5, 464.087, 957)
femmesh.addNode(54.9978, -142.5, 429.204, 958)
femmesh.addNode(54.9571, -142.5, 454.11, 959)
femmesh.addNode(-29.0243, -142.5, 536.519, 960)
femmesh.addNode(-1.58012, -142.5, 500.218, 961)
femmesh.addNode(-1.54736, -142.5, 525.264, 962)
femmesh.addNode(-1.4101, -142.5, 550.352, 963)
femmesh.addNode(-56.6928, -142.5, 571.757, 964)
femmesh.addNode(-29.1104, -142.5, 586.339, 965)
femmesh.addNode(-1.43386, -142.5, 575.302, 966)
femmesh.addNode(26.0341, -142.5, 514.051, 967)
femmesh.addNode(54.9089, -142.5, 479.029, 968)
femmesh.addNode(54.902, -142.5, 504.012, 969)
femmesh.addNode(26.0431, -142.5, 564.047, 970)
femmesh.addNode(54.9347, -142.5, 529.058, 971)
femmesh.addNode(54.9336, -142.5, 554.052, 972)
femmesh.addNode(-84.3983, -142.5, 607.013, 973)
femmesh.addNode(-56.9161, -142.5, 596.49, 974)
femmesh.addNode(-113.499, -142.5, 565.954, 975)
femmesh.addNode(-113.499, -142.5, 590.954, 976)
femmesh.addNode(-84.2927, -142.5, 657.078, 977)
femmesh.addNode(-56.8158, -142.5, 621.595, 978)
femmesh.addNode(-56.857, -142.5, 646.506, 979)
femmesh.addNode(-113.399, -142.5, 616.059, 980)
femmesh.addNode(-113.399, -142.5, 641.059, 981)
femmesh.addNode(-84.2111, -142.5, 707.345, 982)
femmesh.addNode(-56.8518, -142.5, 671.466, 983)
femmesh.addNode(-56.734, -142.5, 696.719, 984)
femmesh.addNode(-113.394, -142.5, 666.019, 985)
femmesh.addNode(-113.394, -142.5, 691.019, 986)
femmesh.addNode(-56.6577, -142.5, 722.026, 987)
femmesh.addNode(-56.4217, -142.5, 747.276, 988)
femmesh.addNode(-84.3018, -142.5, 757.304, 989)
femmesh.addNode(-113.317, -142.5, 716.326, 990)
femmesh.addNode(-113.317, -142.5, 741.326, 991)
femmesh.addNode(-29.3749, -142.5, 635.983, 992)
femmesh.addNode(-1.65719, -142.5, 600.035, 993)
femmesh.addNode(-1.71503, -142.5, 624.943, 994)
femmesh.addNode(-1.75623, -142.5, 649.854, 995)
femmesh.addNode(-29.2983, -142.5, 686.147, 996)
femmesh.addNode(-1.73983, -142.5, 674.89, 997)
femmesh.addNode(25.9615, -142.5, 613.906, 998)
femmesh.addNode(54.9098, -142.5, 579.003, 999)
femmesh.addNode(54.8924, -142.5, 603.971, 1000)
femmesh.addNode(25.92, -142.5, 663.85, 1001)
femmesh.addNode(54.8345, -142.5, 628.879, 1002)
femmesh.addNode(54.8259, -142.5, 653.866, 1003)
femmesh.addNode(-28.9447, -142.5, 736.649, 1004)
femmesh.addNode(-1.62209, -142.5, 700.143, 1005)
femmesh.addNode(-1.44731, -142.5, 725.531, 1006)
femmesh.addNode(-1.2114, -142.5, 750.781, 1007)
femmesh.addNode(-56.5888, -142.5, 771.928, 1008)
femmesh.addNode(-28.8971, -142.5, 786.689, 1009)
femmesh.addNode(-1.09769, -142.5, 775.84, 1010)
femmesh.addNode(26.1112, -142.5, 714.274, 1011)
femmesh.addNode(54.8423, -142.5, 678.902, 1012)
femmesh.addNode(54.8888, -142.5, 704.103, 1013)
femmesh.addNode(55.0636, -142.5, 729.491, 1014)
femmesh.addNode(55.0584, -142.5, 754.58, 1015)
femmesh.addNode(26.3997, -142.5, 764.721, 1016)
femmesh.addNode(83.8573, -142.5, 444.146, 1017)
femmesh.addNode(83.8096, -142.5, 494.035, 1018)
femmesh.addNode(83.8015, -142.5, 544.014, 1019)
femmesh.addNode(83.7829, -142.5, 593.976, 1020)
femmesh.addNode(83.7569, -142.5, 643.931, 1021)
femmesh.addNode(83.7947, -142.5, 694.119, 1022)
femmesh.addNode(83.836, -142.5, 744.409, 1023)
femmesh.addNode(55.1721, -142.5, 779.639, 1024)
femmesh.addNode(83.8677, -142.5, 794.387, 1025)
femmesh.addNode(-84.3809, -142.5, 807.042, 1026)
femmesh.addNode(-56.7772, -142.5, 796.718, 1027)
femmesh.addNode(-113.484, -142.5, 765.978, 1028)
femmesh.addNode(-113.484, -142.5, 790.978, 1029)
femmesh.addNode(-84.0997, -142.5, 858.051, 1030)
femmesh.addNode(-56.6892, -142.5, 821.803, 1031)
femmesh.addNode(-56.7523, -142.5, 846.897, 1032)
femmesh.addNode(-113.396, -142.5, 816.063, 1033)
femmesh.addNode(-113.396, -142.5, 841.063, 1034)
femmesh.addNode(-82.2539, -142.5, 914.162, 1035)
femmesh.addNode(-56.559, -142.5, 872.821, 1036)
femmesh.addNode(-56.1791, -142.5, 899.218, 1037)
femmesh.addNode(-113.203, -142.5, 866.988, 1038)
femmesh.addNode(-113.203, -142.5, 891.988, 1039)
femmesh.addNode(-54.5266, -142.5, 929.404, 1040)
femmesh.addNode(-53.525, -142.5, 952.773, 1041)
femmesh.addNode(-111.551, -142.5, 922.174, 1042)
femmesh.addNode(-111.551, -142.5, 947.174, 1043)
femmesh.addNode(-29.1485, -142.5, 836.573, 1044)
femmesh.addNode(-1.28606, -142.5, 800.63, 1045)
femmesh.addNode(-1.47824, -142.5, 825.404, 1046)
femmesh.addNode(-1.54127, -142.5, 850.498, 1047)
femmesh.addNode(-28.8317, -142.5, 888.064, 1048)
femmesh.addNode(-1.55582, -142.5, 876.507, 1049)
femmesh.addNode(26.3212, -142.5, 814.555, 1050)
femmesh.addNode(55.2089, -142.5, 804.528, 1051)
femmesh.addNode(26.1145, -142.5, 865.339, 1052)
femmesh.addNode(55.0167, -142.5, 829.303, 1053)
femmesh.addNode(54.9815, -142.5, 854.527, 1054)
femmesh.addNode(-27.4503, -142.5, 937.829, 1055)
femmesh.addNode(-1.17596, -142.5, 902.904, 1056)
femmesh.addNode(-1.45404, -142.5, 932.124, 1057)
femmesh.addNode(-0.452457, -142.5, 955.493, 1058)
femmesh.addNode(25.8218, -142.5, 920.568, 1059)
femmesh.addNode(54.967, -142.5, 880.536, 1060)
femmesh.addNode(54.6094, -142.5, 906.005, 1061)
femmesh.addNode(54.3314, -142.5, 935.226, 1062)
femmesh.addNode(52.1382, -142.5, 956.939, 1063)
femmesh.addNode(83.8692, -142.5, 844.501, 1064)
femmesh.addNode(83.4765, -142.5, 895.194, 1065)
femmesh.addNode(80.9258, -142.5, 942.377, 1066)
femmesh.addNode(142.5, 118.75, 0.0, 1067)
femmesh.addNode(142.5, 142.5, 25.0, 1068)
femmesh.addNode(142.5, 118.75, 1000.0, 1069)
femmesh.addNode(142.5, 142.5, 975.0, 1070)
femmesh.addNode(142.5, -71.25, 0.0, 1071)
femmesh.addNode(142.5, -86.6401, 18.8035, 1072)
femmesh.addNode(142.5, -118.75, 0.0, 1073)
femmesh.addNode(142.5, -118.75, 25.0, 1074)
femmesh.addNode(142.5, -23.75, 0.0, 1075)
femmesh.addNode(142.5, -62.8901, 18.8035, 1076)
femmesh.addNode(142.5, -37.0488, 25.7004, 1077)
femmesh.addNode(142.5, 23.75, 0.0, 1078)
femmesh.addNode(142.5, -13.2988, 25.7004, 1079)
femmesh.addNode(142.5, 12.4598, 19.3479, 1080)
femmesh.addNode(142.5, 71.25, 0.0, 1081)
femmesh.addNode(142.5, 36.2098, 19.3479, 1082)
femmesh.addNode(142.5, 63.7331, 27.7049, 1083)
femmesh.addNode(142.5, 118.75, 25.0, 1084)
femmesh.addNode(142.5, 87.4831, 27.7049, 1085)
femmesh.addNode(142.5, -71.25, 1000.0, 1086)
femmesh.addNode(142.5, -87.8006, 972.174, 1087)
femmesh.addNode(142.5, -118.75, 975.0, 1088)
femmesh.addNode(142.5, -118.75, 1000.0, 1089)
femmesh.addNode(142.5, -23.75, 1000.0, 1090)
femmesh.addNode(142.5, -64.0506, 972.174, 1091)
femmesh.addNode(142.5, -36.9743, 980.599, 1092)
femmesh.addNode(142.5, 23.75, 1000.0, 1093)
femmesh.addNode(142.5, -13.2243, 980.599, 1094)
femmesh.addNode(142.5, 12.7719, 974.894, 1095)
femmesh.addNode(142.5, 71.25, 1000.0, 1096)
femmesh.addNode(142.5, 36.5219, 974.894, 1097)
femmesh.addNode(142.5, 63.1163, 982.045, 1098)
femmesh.addNode(142.5, 118.75, 975.0, 1099)
femmesh.addNode(142.5, 86.8663, 982.045, 1100)
femmesh.addNode(142.5, 142.5, 75.0, 1101)
femmesh.addNode(142.5, 111.233, 52.7049, 1102)
femmesh.addNode(142.5, 142.5, 125.0, 1103)
femmesh.addNode(142.5, 111.233, 77.7049, 1104)
femmesh.addNode(142.5, 112.823, 107.822, 1105)
femmesh.addNode(142.5, 142.5, 175.0, 1106)
femmesh.addNode(142.5, 112.823, 132.822, 1107)
femmesh.addNode(142.5, 113.109, 159.034, 1108)
femmesh.addNode(142.5, 142.5, 225.0, 1109)
femmesh.addNode(142.5, 113.109, 184.034, 1110)
femmesh.addNode(142.5, 113.134, 209.34, 1111)
femmesh.addNode(142.5, 142.5, 275.0, 1112)
femmesh.addNode(142.5, 113.134, 234.34, 1113)
femmesh.addNode(142.5, 113.128, 259.422, 1114)
femmesh.addNode(142.5, 142.5, 325.0, 1115)
femmesh.addNode(142.5, 113.128, 284.422, 1116)
femmesh.addNode(142.5, 113.172, 309.65, 1117)
femmesh.addNode(142.5, 142.5, 375.0, 1118)
femmesh.addNode(142.5, 113.172, 334.65, 1119)
femmesh.addNode(142.5, 113.166, 359.747, 1120)
femmesh.addNode(142.5, 142.5, 425.0, 1121)
femmesh.addNode(142.5, 113.166, 384.747, 1122)
femmesh.addNode(142.5, 113.199, 409.62, 1123)
femmesh.addNode(142.5, 142.5, 475.0, 1124)
femmesh.addNode(142.5, 113.199, 434.62, 1125)
femmesh.addNode(142.5, 113.158, 459.526, 1126)
femmesh.addNode(142.5, 142.5, 525.0, 1127)
femmesh.addNode(142.5, 113.158, 484.526, 1128)
femmesh.addNode(142.5, 113.151, 509.51, 1129)
femmesh.addNode(142.5, 142.5, 575.0, 1130)
femmesh.addNode(142.5, 113.151, 534.51, 1131)
femmesh.addNode(142.5, 113.15, 559.504, 1132)
femmesh.addNode(142.5, 142.5, 625.0, 1133)
femmesh.addNode(142.5, 113.15, 584.504, 1134)
femmesh.addNode(142.5, 113.133, 609.472, 1135)
femmesh.addNode(142.5, 142.5, 675.0, 1136)
femmesh.addNode(142.5, 113.133, 634.472, 1137)
femmesh.addNode(142.5, 113.124, 659.459, 1138)
femmesh.addNode(142.5, 142.5, 725.0, 1139)
femmesh.addNode(142.5, 113.124, 684.459, 1140)
femmesh.addNode(142.5, 113.171, 709.66, 1141)
femmesh.addNode(142.5, 142.5, 775.0, 1142)
femmesh.addNode(142.5, 113.171, 734.66, 1143)
femmesh.addNode(142.5, 113.165, 759.749, 1144)
femmesh.addNode(142.5, 142.5, 825.0, 1145)
femmesh.addNode(142.5, 113.165, 784.749, 1146)
femmesh.addNode(142.5, 113.202, 809.638, 1147)
femmesh.addNode(142.5, 142.5, 875.0, 1148)
femmesh.addNode(142.5, 113.202, 834.638, 1149)
femmesh.addNode(142.5, 113.167, 859.862, 1150)
femmesh.addNode(142.5, 142.5, 925.0, 1151)
femmesh.addNode(142.5, 113.167, 884.862, 1152)
femmesh.addNode(142.5, 112.809, 910.331, 1153)
femmesh.addNode(142.5, 112.809, 935.331, 1154)
femmesh.addNode(142.5, 110.616, 957.045, 1155)
femmesh.addNode(142.5, -80.579, 59.369, 1156)
femmesh.addNode(142.5, -52.4388, 44.5039, 1157)
femmesh.addNode(142.5, -110.39, 43.8035, 1158)
femmesh.addNode(142.5, -83.3383, 106.687, 1159)
femmesh.addNode(142.5, -54.7377, 66.2659, 1160)
femmesh.addNode(142.5, -55.5925, 95.204, 1161)
femmesh.addNode(142.5, -112.689, 65.5654, 1162)
femmesh.addNode(142.5, -112.689, 90.5654, 1163)
femmesh.addNode(142.5, -56.0529, 120.76, 1164)
femmesh.addNode(142.5, -56.2212, 146.66, 1165)
femmesh.addNode(142.5, -84.0978, 156.995, 1166)
femmesh.addNode(142.5, -113.149, 116.121, 1167)
femmesh.addNode(142.5, -113.149, 141.121, 1168)
femmesh.addNode(142.5, -0.839004, 45.0483, 1169)
femmesh.addNode(142.5, -27.4523, 80.3389, 1170)
femmesh.addNode(142.5, -0.370743, 67.9707, 1171)
femmesh.addNode(142.5, 25.3878, 61.6182, 1172)
femmesh.addNode(142.5, 52.4428, 47.0528, 1173)
femmesh.addNode(142.5, -28.4754, 135.177, 1174)
femmesh.addNode(142.5, -1.22553, 96.9088, 1175)
femmesh.addNode(142.5, -1.07434, 123.353, 1176)
femmesh.addNode(142.5, -1.24264, 149.252, 1177)
femmesh.addNode(142.5, -56.5203, 171.411, 1178)
femmesh.addNode(142.5, -28.9526, 186.002, 1179)
femmesh.addNode(142.5, -1.27977, 174.791, 1180)
femmesh.addNode(142.5, 26.0072, 110.985, 1181)
femmesh.addNode(142.5, 52.9111, 69.9752, 1182)
femmesh.addNode(142.5, 54.5011, 100.092, 1183)
femmesh.addNode(142.5, 26.1213, 162.967, 1184)
femmesh.addNode(142.5, 54.6523, 126.536, 1185)
femmesh.addNode(142.5, 54.938, 152.749, 1186)
femmesh.addNode(142.5, -84.3363, 206.916, 1187)
femmesh.addNode(142.5, -56.8292, 196.337, 1188)
femmesh.addNode(142.5, -113.448, 165.873, 1189)
femmesh.addNode(142.5, -113.448, 190.873, 1190)
femmesh.addNode(142.5, -84.2793, 257.059, 1191)
femmesh.addNode(142.5, -56.7687, 221.506, 1192)
femmesh.addNode(142.5, -56.8363, 246.472, 1193)
femmesh.addNode(142.5, -113.388, 216.043, 1194)
femmesh.addNode(142.5, -113.388, 241.043, 1195)
femmesh.addNode(142.5, -84.2082, 307.341, 1196)
femmesh.addNode(142.5, -56.8397, 271.445, 1197)
femmesh.addNode(142.5, -56.7292, 296.711, 1198)
femmesh.addNode(142.5, -113.391, 266.016, 1199)
femmesh.addNode(142.5, -113.391, 291.016, 1200)
femmesh.addNode(142.5, -56.6547, 322.021, 1201)
femmesh.addNode(142.5, -56.4206, 347.274, 1202)
femmesh.addNode(142.5, -84.3011, 357.304, 1203)
femmesh.addNode(142.5, -113.317, 316.326, 1204)
femmesh.addNode(142.5, -113.317, 341.326, 1205)
femmesh.addNode(142.5, -29.3292, 235.893, 1206)
femmesh.addNode(142.5, -1.58871, 199.717, 1207)
femmesh.addNode(142.5, -1.66506, 224.804, 1208)
femmesh.addNode(142.5, -1.73262, 249.769, 1209)
femmesh.addNode(142.5, -29.2862, 286.125, 1210)
femmesh.addNode(142.5, -1.72536, 274.855, 1211)
femmesh.addNode(142.5, 26.0078, 213.593, 1212)
femmesh.addNode(142.5, 54.9009, 178.287, 1213)
femmesh.addNode(142.5, 54.9259, 203.593, 1214)
femmesh.addNode(142.5, 25.9387, 263.766, 1215)
femmesh.addNode(142.5, 54.8496, 228.68, 1216)
femmesh.addNode(142.5, 54.8433, 253.763, 1217)
femmesh.addNode(142.5, -28.9416, 336.644, 1218)
femmesh.addNode(142.5, -1.61485, 300.122, 1219)
femmesh.addNode(142.5, -1.44341, 325.522, 1220)
femmesh.addNode(142.5, -1.20936, 350.775, 1221)
femmesh.addNode(142.5, -56.588, 371.927, 1222)
femmesh.addNode(142.5, -28.8964, 386.688, 1223)
femmesh.addNode(142.5, -1.09664, 375.838, 1224)
femmesh.addNode(142.5, 26.1174, 314.253, 1225)
femmesh.addNode(142.5, 54.8505, 278.848, 1226)
femmesh.addNode(142.5, 54.8952, 304.076, 1227)
femmesh.addNode(142.5, 55.0667, 329.477, 1228)
femmesh.addNode(142.5, 55.0604, 354.573, 1229)
femmesh.addNode(142.5, 26.4016, 364.716, 1230)
femmesh.addNode(142.5, 81.5561, 85.5268, 1231)
femmesh.addNode(142.5, 83.4319, 141.856, 1232)
femmesh.addNode(142.5, 83.7427, 193.374, 1233)
femmesh.addNode(142.5, 83.7614, 243.762, 1234)
femmesh.addNode(142.5, 83.7998, 294.073, 1235)
femmesh.addNode(142.5, 83.8382, 344.397, 1236)
femmesh.addNode(142.5, 55.1731, 379.636, 1237)
femmesh.addNode(142.5, 83.865, 394.367, 1238)
femmesh.addNode(142.5, -84.3807, 407.041, 1239)
femmesh.addNode(142.5, -56.7768, 396.717, 1240)
femmesh.addNode(142.5, -113.484, 365.978, 1241)
femmesh.addNode(142.5, -113.484, 390.978, 1242)
femmesh.addNode(142.5, -84.2898, 457.083, 1243)
femmesh.addNode(142.5, -56.6891, 421.803, 1244)
femmesh.addNode(142.5, -56.8156, 446.574, 1245)
femmesh.addNode(142.5, -113.396, 416.063, 1246)
femmesh.addNode(142.5, -113.396, 441.063, 1247)
femmesh.addNode(142.5, -84.2106, 507.346, 1248)
femmesh.addNode(142.5, -56.8125, 471.53, 1249)
femmesh.addNode(142.5, -56.7241, 496.735, 1250)
femmesh.addNode(142.5, -113.393, 491.02, 1251)
femmesh.addNode(142.5, -113.393, 466.02, 1252)
femmesh.addNode(142.5, -56.6481, 522.042, 1253)
femmesh.addNode(142.5, -56.5108, 547.13, 1254)
femmesh.addNode(142.5, -84.3166, 557.28, 1255)
femmesh.addNode(142.5, -113.317, 541.326, 1256)
femmesh.addNode(142.5, -113.317, 516.326, 1257)
femmesh.addNode(142.5, -29.2117, 436.25, 1258)
femmesh.addNode(142.5, -1.28543, 400.628, 1259)
femmesh.addNode(142.5, -1.4938, 425.323, 1260)
femmesh.addNode(142.5, -1.62033, 450.095, 1261)
femmesh.addNode(142.5, -29.2499, 486.226, 1262)
femmesh.addNode(142.5, -1.66847, 475.013, 1263)
femmesh.addNode(142.5, 26.3059, 414.473, 1264)
femmesh.addNode(142.5, 55.2062, 404.509, 1265)
femmesh.addNode(142.5, 26.0494, 464.087, 1266)
femmesh.addNode(142.5, 54.9978, 429.204, 1267)
femmesh.addNode(142.5, 54.9571, 454.11, 1268)
femmesh.addNode(142.5, -29.0243, 536.519, 1269)
femmesh.addNode(142.5, -1.58012, 500.218, 1270)
femmesh.addNode(142.5, -1.54736, 525.264, 1271)
femmesh.addNode(142.5, -1.4101, 550.352, 1272)
femmesh.addNode(142.5, -56.6928, 571.757, 1273)
femmesh.addNode(142.5, -29.1104, 586.339, 1274)
femmesh.addNode(142.5, -1.43386, 575.302, 1275)
femmesh.addNode(142.5, 26.0341, 514.051, 1276)
femmesh.addNode(142.5, 54.9089, 479.029, 1277)
femmesh.addNode(142.5, 54.902, 504.012, 1278)
femmesh.addNode(142.5, 26.0431, 564.047, 1279)
femmesh.addNode(142.5, 54.9347, 529.058, 1280)
femmesh.addNode(142.5, 54.9336, 554.052, 1281)
femmesh.addNode(142.5, -84.3983, 607.013, 1282)
femmesh.addNode(142.5, -56.9161, 596.49, 1283)
femmesh.addNode(142.5, -113.499, 565.954, 1284)
femmesh.addNode(142.5, -113.499, 590.954, 1285)
femmesh.addNode(142.5, -84.2927, 657.078, 1286)
femmesh.addNode(142.5, -56.8158, 621.595, 1287)
femmesh.addNode(142.5, -56.857, 646.506, 1288)
femmesh.addNode(142.5, -113.399, 616.059, 1289)
femmesh.addNode(142.5, -113.399, 641.059, 1290)
femmesh.addNode(142.5, -84.2111, 707.345, 1291)
femmesh.addNode(142.5, -56.8518, 671.466, 1292)
femmesh.addNode(142.5, -56.734, 696.719, 1293)
femmesh.addNode(142.5, -113.394, 666.019, 1294)
femmesh.addNode(142.5, -113.394, 691.019, 1295)
femmesh.addNode(142.5, -56.6577, 722.026, 1296)
femmesh.addNode(142.5, -56.4217, 747.276, 1297)
femmesh.addNode(142.5, -84.3018, 757.304, 1298)
femmesh.addNode(142.5, -113.317, 716.326, 1299)
femmesh.addNode(142.5, -113.317, 741.326, 1300)
femmesh.addNode(142.5, -29.3749, 635.983, 1301)
femmesh.addNode(142.5, -1.65719, 600.035, 1302)
femmesh.addNode(142.5, -1.71503, 624.943, 1303)
femmesh.addNode(142.5, -1.75623, 649.854, 1304)
femmesh.addNode(142.5, -29.2983, 686.147, 1305)
femmesh.addNode(142.5, -1.73983, 674.89, 1306)
femmesh.addNode(142.5, 25.9615, 613.906, 1307)
femmesh.addNode(142.5, 54.9098, 579.003, 1308)
femmesh.addNode(142.5, 54.8924, 603.971, 1309)
femmesh.addNode(142.5, 25.92, 663.85, 1310)
femmesh.addNode(142.5, 54.8345, 628.879, 1311)
femmesh.addNode(142.5, 54.8259, 653.866, 1312)
femmesh.addNode(142.5, -28.9447, 736.649, 1313)
femmesh.addNode(142.5, -1.62209, 700.143, 1314)
femmesh.addNode(142.5, -1.44731, 725.531, 1315)
femmesh.addNode(142.5, -1.2114, 750.781, 1316)
femmesh.addNode(142.5, -56.5888, 771.928, 1317)
femmesh.addNode(142.5, -28.8971, 786.689, 1318)
femmesh.addNode(142.5, -1.09769, 775.84, 1319)
femmesh.addNode(142.5, 26.1112, 714.274, 1320)
femmesh.addNode(142.5, 54.8423, 678.902, 1321)
femmesh.addNode(142.5, 54.8888, 704.103, 1322)
femmesh.addNode(142.5, 55.0636, 729.491, 1323)
femmesh.addNode(142.5, 55.0584, 754.58, 1324)
femmesh.addNode(142.5, 26.3997, 764.721, 1325)
femmesh.addNode(142.5, 83.8573, 444.146, 1326)
femmesh.addNode(142.5, 83.8096, 494.035, 1327)
femmesh.addNode(142.5, 83.8015, 544.014, 1328)
femmesh.addNode(142.5, 83.7829, 593.976, 1329)
femmesh.addNode(142.5, 83.7569, 643.931, 1330)
femmesh.addNode(142.5, 83.7947, 694.119, 1331)
femmesh.addNode(142.5, 83.836, 744.409, 1332)
femmesh.addNode(142.5, 55.1721, 779.639, 1333)
femmesh.addNode(142.5, 83.8677, 794.387, 1334)
femmesh.addNode(142.5, -84.3809, 807.042, 1335)
femmesh.addNode(142.5, -56.7772, 796.718, 1336)
femmesh.addNode(142.5, -113.484, 765.978, 1337)
femmesh.addNode(142.5, -113.484, 790.978, 1338)
femmesh.addNode(142.5, -84.0997, 858.051, 1339)
femmesh.addNode(142.5, -56.6892, 821.803, 1340)
femmesh.addNode(142.5, -56.7523, 846.897, 1341)
femmesh.addNode(142.5, -113.396, 816.063, 1342)
femmesh.addNode(142.5, -113.396, 841.063, 1343)
femmesh.addNode(142.5, -82.2539, 914.162, 1344)
femmesh.addNode(142.5, -56.559, 872.821, 1345)
femmesh.addNode(142.5, -56.1791, 899.218, 1346)
femmesh.addNode(142.5, -113.203, 866.988, 1347)
femmesh.addNode(142.5, -113.203, 891.988, 1348)
femmesh.addNode(142.5, -54.5266, 929.404, 1349)
femmesh.addNode(142.5, -53.525, 952.773, 1350)
femmesh.addNode(142.5, -111.551, 922.174, 1351)
femmesh.addNode(142.5, -111.551, 947.174, 1352)
femmesh.addNode(142.5, -29.1485, 836.573, 1353)
femmesh.addNode(142.5, -1.28606, 800.63, 1354)
femmesh.addNode(142.5, -1.47824, 825.404, 1355)
femmesh.addNode(142.5, -1.54127, 850.498, 1356)
femmesh.addNode(142.5, -28.8317, 888.064, 1357)
femmesh.addNode(142.5, -1.55582, 876.507, 1358)
femmesh.addNode(142.5, 26.3212, 814.555, 1359)
femmesh.addNode(142.5, 55.2089, 804.528, 1360)
femmesh.addNode(142.5, 26.1145, 865.339, 1361)
femmesh.addNode(142.5, 55.0167, 829.303, 1362)
femmesh.addNode(142.5, 54.9815, 854.527, 1363)
femmesh.addNode(142.5, -27.4503, 937.829, 1364)
femmesh.addNode(142.5, -1.17596, 902.904, 1365)
femmesh.addNode(142.5, -1.45404, 932.124, 1366)
femmesh.addNode(142.5, -0.452457, 955.493, 1367)
femmesh.addNode(142.5, 25.8218, 920.568, 1368)
femmesh.addNode(142.5, 54.967, 880.536, 1369)
femmesh.addNode(142.5, 54.6094, 906.005, 1370)
femmesh.addNode(142.5, 54.3314, 935.226, 1371)
femmesh.addNode(142.5, 52.1382, 956.939, 1372)
femmesh.addNode(142.5, 83.8692, 844.501, 1373)
femmesh.addNode(142.5, 83.4765, 895.194, 1374)
femmesh.addNode(142.5, 80.9258, 942.377, 1375)
femmesh.addNode(71.25, 142.5, 0.0, 1376)
femmesh.addNode(86.6401, 142.5, 18.8035, 1377)
femmesh.addNode(118.75, 142.5, 0.0, 1378)
femmesh.addNode(118.75, 142.5, 25.0, 1379)
femmesh.addNode(23.75, 142.5, 0.0, 1380)
femmesh.addNode(62.8901, 142.5, 18.8035, 1381)
femmesh.addNode(37.0488, 142.5, 25.7004, 1382)
femmesh.addNode(-23.75, 142.5, 0.0, 1383)
femmesh.addNode(13.2988, 142.5, 25.7004, 1384)
femmesh.addNode(-12.4598, 142.5, 19.3479, 1385)
femmesh.addNode(-71.25, 142.5, 0.0, 1386)
femmesh.addNode(-36.2098, 142.5, 19.3479, 1387)
femmesh.addNode(-63.7331, 142.5, 27.7049, 1388)
femmesh.addNode(-87.4831, 142.5, 27.7049, 1389)
femmesh.addNode(-118.75, 142.5, 0.0, 1390)
femmesh.addNode(-118.75, 142.5, 25.0, 1391)
femmesh.addNode(71.25, 142.5, 1000.0, 1392)
femmesh.addNode(87.8006, 142.5, 972.174, 1393)
femmesh.addNode(118.75, 142.5, 975.0, 1394)
femmesh.addNode(118.75, 142.5, 1000.0, 1395)
femmesh.addNode(23.75, 142.5, 1000.0, 1396)
femmesh.addNode(64.0506, 142.5, 972.174, 1397)
femmesh.addNode(36.9743, 142.5, 980.599, 1398)
femmesh.addNode(-23.75, 142.5, 1000.0, 1399)
femmesh.addNode(13.2243, 142.5, 980.599, 1400)
femmesh.addNode(-12.7719, 142.5, 974.894, 1401)
femmesh.addNode(-71.25, 142.5, 1000.0, 1402)
femmesh.addNode(-36.5219, 142.5, 974.894, 1403)
femmesh.addNode(-63.1163, 142.5, 982.045, 1404)
femmesh.addNode(-86.8663, 142.5, 982.045, 1405)
femmesh.addNode(-118.75, 142.5, 975.0, 1406)
femmesh.addNode(-118.75, 142.5, 1000.0, 1407)
femmesh.addNode(80.579, 142.5, 59.369, 1408)
femmesh.addNode(52.4388, 142.5, 44.5039, 1409)
femmesh.addNode(110.39, 142.5, 43.8035, 1410)
femmesh.addNode(83.3383, 142.5, 106.687, 1411)
femmesh.addNode(54.7377, 142.5, 66.2659, 1412)
femmesh.addNode(55.5925, 142.5, 95.204, 1413)
femmesh.addNode(112.689, 142.5, 65.5654, 1414)
femmesh.addNode(112.689, 142.5, 90.5654, 1415)
femmesh.addNode(56.0529, 142.5, 120.76, 1416)
femmesh.addNode(56.2212, 142.5, 146.66, 1417)
femmesh.addNode(84.0978, 142.5, 156.995, 1418)
femmesh.addNode(113.149, 142.5, 116.121, 1419)
femmesh.addNode(113.149, 142.5, 141.121, 1420)
femmesh.addNode(0.839004, 142.5, 45.0483, 1421)
femmesh.addNode(27.4523, 142.5, 80.3389, 1422)
femmesh.addNode(0.370743, 142.5, 67.9707, 1423)
femmesh.addNode(-25.3878, 142.5, 61.6182, 1424)
femmesh.addNode(-52.4428, 142.5, 47.0528, 1425)
femmesh.addNode(28.4754, 142.5, 135.177, 1426)
femmesh.addNode(1.22553, 142.5, 96.9088, 1427)
femmesh.addNode(1.07434, 142.5, 123.353, 1428)
femmesh.addNode(1.24264, 142.5, 149.252, 1429)
femmesh.addNode(56.5203, 142.5, 171.411, 1430)
femmesh.addNode(28.9526, 142.5, 186.002, 1431)
femmesh.addNode(1.27977, 142.5, 174.791, 1432)
femmesh.addNode(-26.0072, 142.5, 110.985, 1433)
femmesh.addNode(-52.9111, 142.5, 69.9752, 1434)
femmesh.addNode(-54.5011, 142.5, 100.092, 1435)
femmesh.addNode(-26.1213, 142.5, 162.967, 1436)
femmesh.addNode(-54.6523, 142.5, 126.536, 1437)
femmesh.addNode(-54.938, 142.5, 152.749, 1438)
femmesh.addNode(84.3363, 142.5, 206.916, 1439)
femmesh.addNode(56.8292, 142.5, 196.337, 1440)
femmesh.addNode(113.448, 142.5, 165.873, 1441)
femmesh.addNode(113.448, 142.5, 190.873, 1442)
femmesh.addNode(84.2793, 142.5, 257.059, 1443)
femmesh.addNode(56.7687, 142.5, 221.506, 1444)
femmesh.addNode(56.8363, 142.5, 246.472, 1445)
femmesh.addNode(113.388, 142.5, 216.043, 1446)
femmesh.addNode(113.388, 142.5, 241.043, 1447)
femmesh.addNode(84.2082, 142.5, 307.341, 1448)
femmesh.addNode(56.8397, 142.5, 271.445, 1449)
femmesh.addNode(56.7292, 142.5, 296.711, 1450)
femmesh.addNode(113.391, 142.5, 266.016, 1451)
femmesh.addNode(113.391, 142.5, 291.016, 1452)
femmesh.addNode(56.6547, 142.5, 322.021, 1453)
femmesh.addNode(56.4206, 142.5, 347.274, 1454)
femmesh.addNode(84.3011, 142.5, 357.304, 1455)
femmesh.addNode(113.317, 142.5, 316.326, 1456)
femmesh.addNode(113.317, 142.5, 341.326, 1457)
femmesh.addNode(29.3292, 142.5, 235.893, 1458)
femmesh.addNode(1.58871, 142.5, 199.717, 1459)
femmesh.addNode(1.66506, 142.5, 224.804, 1460)
femmesh.addNode(1.73262, 142.5, 249.769, 1461)
femmesh.addNode(29.2862, 142.5, 286.125, 1462)
femmesh.addNode(1.72536, 142.5, 274.855, 1463)
femmesh.addNode(-26.0078, 142.5, 213.593, 1464)
femmesh.addNode(-54.9009, 142.5, 178.287, 1465)
femmesh.addNode(-54.9259, 142.5, 203.593, 1466)
femmesh.addNode(-25.9387, 142.5, 263.766, 1467)
femmesh.addNode(-54.8496, 142.5, 228.68, 1468)
femmesh.addNode(-54.8433, 142.5, 253.763, 1469)
femmesh.addNode(28.9416, 142.5, 336.644, 1470)
femmesh.addNode(1.61485, 142.5, 300.122, 1471)
femmesh.addNode(1.44341, 142.5, 325.522, 1472)
femmesh.addNode(1.20936, 142.5, 350.775, 1473)
femmesh.addNode(56.588, 142.5, 371.927, 1474)
femmesh.addNode(28.8964, 142.5, 386.688, 1475)
femmesh.addNode(1.09664, 142.5, 375.838, 1476)
femmesh.addNode(-26.1174, 142.5, 314.253, 1477)
femmesh.addNode(-54.8505, 142.5, 278.848, 1478)
femmesh.addNode(-54.8952, 142.5, 304.076, 1479)
femmesh.addNode(-55.0667, 142.5, 329.477, 1480)
femmesh.addNode(-55.0604, 142.5, 354.573, 1481)
femmesh.addNode(-26.4016, 142.5, 364.716, 1482)
femmesh.addNode(-81.5561, 142.5, 85.5268, 1483)
femmesh.addNode(-111.233, 142.5, 52.7049, 1484)
femmesh.addNode(-111.233, 142.5, 77.7049, 1485)
femmesh.addNode(-83.4319, 142.5, 141.856, 1486)
femmesh.addNode(-112.823, 142.5, 107.822, 1487)
femmesh.addNode(-112.823, 142.5, 132.822, 1488)
femmesh.addNode(-83.7427, 142.5, 193.374, 1489)
femmesh.addNode(-113.109, 142.5, 159.034, 1490)
femmesh.addNode(-113.109, 142.5, 184.034, 1491)
femmesh.addNode(-83.7614, 142.5, 243.762, 1492)
femmesh.addNode(-113.134, 142.5, 209.34, 1493)
femmesh.addNode(-113.134, 142.5, 234.34, 1494)
femmesh.addNode(-83.7998, 142.5, 294.073, 1495)
femmesh.addNode(-113.128, 142.5, 259.422, 1496)
femmesh.addNode(-113.128, 142.5, 284.422, 1497)
femmesh.addNode(-83.8382, 142.5, 344.397, 1498)
femmesh.addNode(-113.172, 142.5, 309.65, 1499)
femmesh.addNode(-113.172, 142.5, 334.65, 1500)
femmesh.addNode(-55.1731, 142.5, 379.636, 1501)
femmesh.addNode(-83.865, 142.5, 394.367, 1502)
femmesh.addNode(-113.166, 142.5, 359.747, 1503)
femmesh.addNode(-113.166, 142.5, 384.747, 1504)
femmesh.addNode(84.3807, 142.5, 407.041, 1505)
femmesh.addNode(56.7768, 142.5, 396.717, 1506)
femmesh.addNode(113.484, 142.5, 365.978, 1507)
femmesh.addNode(113.484, 142.5, 390.978, 1508)
femmesh.addNode(84.2898, 142.5, 457.083, 1509)
femmesh.addNode(56.6891, 142.5, 421.803, 1510)
femmesh.addNode(56.8156, 142.5, 446.574, 1511)
femmesh.addNode(113.396, 142.5, 416.063, 1512)
femmesh.addNode(113.396, 142.5, 441.063, 1513)
femmesh.addNode(84.2106, 142.5, 507.346, 1514)
femmesh.addNode(56.8125, 142.5, 471.53, 1515)
femmesh.addNode(56.7241, 142.5, 496.735, 1516)
femmesh.addNode(113.393, 142.5, 491.02, 1517)
femmesh.addNode(113.393, 142.5, 466.02, 1518)
femmesh.addNode(56.6481, 142.5, 522.042, 1519)
femmesh.addNode(56.5108, 142.5, 547.13, 1520)
femmesh.addNode(84.3166, 142.5, 557.28, 1521)
femmesh.addNode(113.317, 142.5, 541.326, 1522)
femmesh.addNode(113.317, 142.5, 516.326, 1523)
femmesh.addNode(29.2117, 142.5, 436.25, 1524)
femmesh.addNode(1.28543, 142.5, 400.628, 1525)
femmesh.addNode(1.4938, 142.5, 425.323, 1526)
femmesh.addNode(1.62033, 142.5, 450.095, 1527)
femmesh.addNode(29.2499, 142.5, 486.226, 1528)
femmesh.addNode(1.66847, 142.5, 475.013, 1529)
femmesh.addNode(-26.3059, 142.5, 414.473, 1530)
femmesh.addNode(-55.2062, 142.5, 404.509, 1531)
femmesh.addNode(-26.0494, 142.5, 464.087, 1532)
femmesh.addNode(-54.9978, 142.5, 429.204, 1533)
femmesh.addNode(-54.9571, 142.5, 454.11, 1534)
femmesh.addNode(29.0243, 142.5, 536.519, 1535)
femmesh.addNode(1.58012, 142.5, 500.218, 1536)
femmesh.addNode(1.54736, 142.5, 525.264, 1537)
femmesh.addNode(1.4101, 142.5, 550.352, 1538)
femmesh.addNode(56.6928, 142.5, 571.757, 1539)
femmesh.addNode(29.1104, 142.5, 586.339, 1540)
femmesh.addNode(1.43386, 142.5, 575.302, 1541)
femmesh.addNode(-26.0341, 142.5, 514.051, 1542)
femmesh.addNode(-54.9089, 142.5, 479.029, 1543)
femmesh.addNode(-54.902, 142.5, 504.012, 1544)
femmesh.addNode(-26.0431, 142.5, 564.047, 1545)
femmesh.addNode(-54.9347, 142.5, 529.058, 1546)
femmesh.addNode(-54.9336, 142.5, 554.052, 1547)
femmesh.addNode(84.3983, 142.5, 607.013, 1548)
femmesh.addNode(56.9161, 142.5, 596.49, 1549)
femmesh.addNode(113.499, 142.5, 565.954, 1550)
femmesh.addNode(113.499, 142.5, 590.954, 1551)
femmesh.addNode(84.2927, 142.5, 657.078, 1552)
femmesh.addNode(56.8158, 142.5, 621.595, 1553)
femmesh.addNode(56.857, 142.5, 646.506, 1554)
femmesh.addNode(113.399, 142.5, 616.059, 1555)
femmesh.addNode(113.399, 142.5, 641.059, 1556)
femmesh.addNode(84.2111, 142.5, 707.345, 1557)
femmesh.addNode(56.8518, 142.5, 671.466, 1558)
femmesh.addNode(56.734, 142.5, 696.719, 1559)
femmesh.addNode(113.394, 142.5, 666.019, 1560)
femmesh.addNode(113.394, 142.5, 691.019, 1561)
femmesh.addNode(56.6577, 142.5, 722.026, 1562)
femmesh.addNode(56.4217, 142.5, 747.276, 1563)
femmesh.addNode(84.3018, 142.5, 757.304, 1564)
femmesh.addNode(113.317, 142.5, 716.326, 1565)
femmesh.addNode(113.317, 142.5, 741.326, 1566)
femmesh.addNode(29.3749, 142.5, 635.983, 1567)
femmesh.addNode(1.65719, 142.5, 600.035, 1568)
femmesh.addNode(1.71503, 142.5, 624.943, 1569)
femmesh.addNode(1.75623, 142.5, 649.854, 1570)
femmesh.addNode(29.2983, 142.5, 686.147, 1571)
femmesh.addNode(1.73983, 142.5, 674.89, 1572)
femmesh.addNode(-25.9615, 142.5, 613.906, 1573)
femmesh.addNode(-54.9098, 142.5, 579.003, 1574)
femmesh.addNode(-54.8924, 142.5, 603.971, 1575)
femmesh.addNode(-25.92, 142.5, 663.85, 1576)
femmesh.addNode(-54.8345, 142.5, 628.879, 1577)
femmesh.addNode(-54.8259, 142.5, 653.866, 1578)
femmesh.addNode(28.9447, 142.5, 736.649, 1579)
femmesh.addNode(1.62209, 142.5, 700.143, 1580)
femmesh.addNode(1.44731, 142.5, 725.531, 1581)
femmesh.addNode(1.2114, 142.5, 750.781, 1582)
femmesh.addNode(56.5888, 142.5, 771.928, 1583)
femmesh.addNode(28.8971, 142.5, 786.689, 1584)
femmesh.addNode(1.09769, 142.5, 775.84, 1585)
femmesh.addNode(-26.1112, 142.5, 714.274, 1586)
femmesh.addNode(-54.8423, 142.5, 678.902, 1587)
femmesh.addNode(-54.8888, 142.5, 704.103, 1588)
femmesh.addNode(-55.0636, 142.5, 729.491, 1589)
femmesh.addNode(-55.0584, 142.5, 754.58, 1590)
femmesh.addNode(-26.3997, 142.5, 764.721, 1591)
femmesh.addNode(-83.8573, 142.5, 444.146, 1592)
femmesh.addNode(-113.199, 142.5, 409.62, 1593)
femmesh.addNode(-113.199, 142.5, 434.62, 1594)
femmesh.addNode(-83.8096, 142.5, 494.035, 1595)
femmesh.addNode(-113.158, 142.5, 484.526, 1596)
femmesh.addNode(-113.158, 142.5, 459.526, 1597)
femmesh.addNode(-83.8015, 142.5, 544.014, 1598)
femmesh.addNode(-113.151, 142.5, 534.51, 1599)
femmesh.addNode(-113.151, 142.5, 509.51, 1600)
femmesh.addNode(-83.7829, 142.5, 593.976, 1601)
femmesh.addNode(-113.15, 142.5, 559.504, 1602)
femmesh.addNode(-113.15, 142.5, 584.504, 1603)
femmesh.addNode(-83.7569, 142.5, 643.931, 1604)
femmesh.addNode(-113.133, 142.5, 609.472, 1605)
femmesh.addNode(-113.133, 142.5, 634.472, 1606)
femmesh.addNode(-83.7947, 142.5, 694.119, 1607)
femmesh.addNode(-113.124, 142.5, 659.459, 1608)
femmesh.addNode(-113.124, 142.5, 684.459, 1609)
femmesh.addNode(-83.836, 142.5, 744.409, 1610)
femmesh.addNode(-113.171, 142.5, 709.66, 1611)
femmesh.addNode(-113.171, 142.5, 734.66, 1612)
femmesh.addNode(-55.1721, 142.5, 779.639, 1613)
femmesh.addNode(-83.8677, 142.5, 794.387, 1614)
femmesh.addNode(-113.165, 142.5, 759.749, 1615)
femmesh.addNode(-113.165, 142.5, 784.749, 1616)
femmesh.addNode(84.3809, 142.5, 807.042, 1617)
femmesh.addNode(56.7772, 142.5, 796.718, 1618)
femmesh.addNode(113.484, 142.5, 765.978, 1619)
femmesh.addNode(113.484, 142.5, 790.978, 1620)
femmesh.addNode(84.0997, 142.5, 858.051, 1621)
femmesh.addNode(56.6892, 142.5, 821.803, 1622)
femmesh.addNode(56.7523, 142.5, 846.897, 1623)
femmesh.addNode(113.396, 142.5, 816.063, 1624)
femmesh.addNode(113.396, 142.5, 841.063, 1625)
femmesh.addNode(82.2539, 142.5, 914.162, 1626)
femmesh.addNode(56.559, 142.5, 872.821, 1627)
femmesh.addNode(56.1791, 142.5, 899.218, 1628)
femmesh.addNode(113.203, 142.5, 866.988, 1629)
femmesh.addNode(113.203, 142.5, 891.988, 1630)
femmesh.addNode(54.5266, 142.5, 929.404, 1631)
femmesh.addNode(53.525, 142.5, 952.773, 1632)
femmesh.addNode(111.551, 142.5, 922.174, 1633)
femmesh.addNode(111.551, 142.5, 947.174, 1634)
femmesh.addNode(29.1485, 142.5, 836.573, 1635)
femmesh.addNode(1.28606, 142.5, 800.63, 1636)
femmesh.addNode(1.47824, 142.5, 825.404, 1637)
femmesh.addNode(1.54127, 142.5, 850.498, 1638)
femmesh.addNode(28.8317, 142.5, 888.064, 1639)
femmesh.addNode(1.55582, 142.5, 876.507, 1640)
femmesh.addNode(-26.3212, 142.5, 814.555, 1641)
femmesh.addNode(-55.2089, 142.5, 804.528, 1642)
femmesh.addNode(-26.1145, 142.5, 865.339, 1643)
femmesh.addNode(-55.0167, 142.5, 829.303, 1644)
femmesh.addNode(-54.9815, 142.5, 854.527, 1645)
femmesh.addNode(27.4503, 142.5, 937.829, 1646)
femmesh.addNode(1.17596, 142.5, 902.904, 1647)
femmesh.addNode(1.45404, 142.5, 932.124, 1648)
femmesh.addNode(0.452457, 142.5, 955.493, 1649)
femmesh.addNode(-25.8218, 142.5, 920.568, 1650)
femmesh.addNode(-54.967, 142.5, 880.536, 1651)
femmesh.addNode(-54.6094, 142.5, 906.005, 1652)
femmesh.addNode(-54.3314, 142.5, 935.226, 1653)
femmesh.addNode(-52.1382, 142.5, 956.939, 1654)
femmesh.addNode(-83.8692, 142.5, 844.501, 1655)
femmesh.addNode(-113.202, 142.5, 809.638, 1656)
femmesh.addNode(-113.202, 142.5, 834.638, 1657)
femmesh.addNode(-83.4765, 142.5, 895.194, 1658)
femmesh.addNode(-113.167, 142.5, 859.862, 1659)
femmesh.addNode(-113.167, 142.5, 884.862, 1660)
femmesh.addNode(-80.9258, 142.5, 942.377, 1661)
femmesh.addNode(-112.809, 142.5, 910.331, 1662)
femmesh.addNode(-112.809, 142.5, 935.331, 1663)
femmesh.addNode(-110.616, 142.5, 957.045, 1664)
return True
def create_elements(femmesh):
# elements
femmesh.addFace([125, 112, 49, 724, 565, 566], 1)
femmesh.addFace([125, 119, 112, 742, 723, 724], 2)
femmesh.addFace([125, 120, 119, 744, 741, 742], 3)
femmesh.addFace([125, 126, 120, 755, 745, 744], 4)
femmesh.addFace([125, 50, 126, 568, 569, 755], 5)
femmesh.addFace([125, 49, 50, 566, 564, 568], 6)
femmesh.addFace([120, 117, 119, 737, 736, 741], 7)
femmesh.addFace([120, 118, 117, 738, 735, 737], 8)
femmesh.addFace([120, 123, 118, 743, 740, 738], 9)
femmesh.addFace([120, 126, 123, 745, 751, 743], 10)
femmesh.addFace([115, 121, 116, 732, 733, 730], 11)
femmesh.addFace([115, 118, 121, 731, 739, 732], 12)
femmesh.addFace([115, 114, 118, 727, 729, 731], 13)
femmesh.addFace([115, 26, 114, 501, 500, 727], 14)
femmesh.addFace([115, 27, 26, 503, 499, 501], 15)
femmesh.addFace([115, 116, 27, 730, 504, 503], 16)
femmesh.addFace([110, 109, 46, 720, 556, 557], 17)
femmesh.addFace([110, 100, 109, 702, 701, 720], 18)
femmesh.addFace([110, 103, 100, 711, 700, 702], 19)
femmesh.addFace([110, 111, 103, 721, 712, 711], 20)
femmesh.addFace([110, 47, 111, 559, 560, 721], 21)
femmesh.addFace([110, 46, 47, 557, 555, 559], 22)
femmesh.addFace([105, 80, 41, 644, 541, 542], 23)
femmesh.addFace([105, 87, 80, 663, 643, 644], 24)
femmesh.addFace([105, 88, 87, 665, 662, 663], 25)
femmesh.addFace([105, 106, 88, 716, 666, 665], 26)
femmesh.addFace([105, 42, 106, 544, 545, 716], 27)
femmesh.addFace([105, 41, 42, 542, 540, 544], 28)
femmesh.addFace([100, 99, 109, 697, 699, 701], 29)
femmesh.addFace([100, 97, 99, 693, 692, 697], 30)
femmesh.addFace([100, 98, 97, 694, 691, 693], 31)
femmesh.addFace([100, 103, 98, 700, 696, 694], 32)
femmesh.addFace([95, 101, 96, 687, 688, 685], 33)
femmesh.addFace([95, 98, 101, 686, 695, 687], 34)
femmesh.addFace([95, 94, 98, 682, 684, 686], 35)
femmesh.addFace([95, 22, 94, 489, 488, 682], 36)
femmesh.addFace([95, 23, 22, 491, 487, 489], 37)
femmesh.addFace([95, 96, 23, 685, 492, 491], 38)
femmesh.addFace([90, 89, 92, 667, 669, 670], 39)
femmesh.addFace([90, 84, 89, 654, 653, 667], 40)
femmesh.addFace([90, 93, 84, 671, 655, 654], 41)
femmesh.addFace([90, 97, 93, 672, 681, 671], 42)
femmesh.addFace([90, 99, 97, 673, 692, 672], 43)
femmesh.addFace([90, 92, 99, 670, 677, 673], 44)
femmesh.addFace([85, 81, 71, 646, 628, 629], 45)
femmesh.addFace([85, 82, 81, 648, 645, 646], 46)
femmesh.addFace([85, 86, 82, 656, 649, 648], 47)
femmesh.addFace([85, 88, 86, 658, 659, 656], 48)
femmesh.addFace([85, 87, 88, 657, 662, 658], 49)
femmesh.addFace([85, 71, 87, 629, 630, 657], 50)
femmesh.addFace([80, 87, 73, 643, 636, 635], 51)
femmesh.addFace([80, 40, 41, 539, 537, 541], 52)
femmesh.addFace([80, 79, 40, 642, 538, 539], 53)
femmesh.addFace([80, 73, 79, 635, 634, 642], 54)
femmesh.addFace([75, 74, 35, 637, 523, 524], 55)
femmesh.addFace([75, 60, 74, 597, 596, 637], 56)
femmesh.addFace([75, 61, 60, 599, 595, 597], 57)
femmesh.addFace([75, 76, 61, 638, 600, 599], 58)
femmesh.addFace([75, 36, 76, 526, 527, 638], 59)
femmesh.addFace([75, 35, 36, 524, 522, 526], 60)
femmesh.addFace([70, 67, 72, 616, 617, 625], 61)
femmesh.addFace([70, 64, 67, 608, 607, 616], 62)
femmesh.addFace([70, 65, 64, 609, 606, 608], 63)
femmesh.addFace([70, 71, 65, 624, 610, 609], 64)
femmesh.addFace([70, 73, 71, 626, 627, 624], 65)
femmesh.addFace([70, 72, 73, 625, 631, 626], 66)
femmesh.addFace([65, 15, 64, 468, 467, 606], 67)
femmesh.addFace([65, 16, 15, 470, 466, 468], 68)
femmesh.addFace([65, 81, 16, 611, 471, 470], 69)
femmesh.addFace([65, 71, 81, 610, 628, 611], 70)
femmesh.addFace([60, 57, 74, 586, 587, 596], 71)
femmesh.addFace([60, 56, 57, 585, 583, 586], 72)
femmesh.addFace([60, 58, 56, 589, 584, 585], 73)
femmesh.addFace([60, 61, 58, 595, 590, 589], 74)
femmesh.addFace([55, 59, 62, 581, 592, 582], 75)
femmesh.addFace([55, 58, 59, 580, 588, 581], 76)
femmesh.addFace([55, 54, 58, 577, 579, 580], 77)
femmesh.addFace([55, 11, 54, 456, 455, 577], 78)
femmesh.addFace([55, 12, 11, 458, 454, 456], 79)
femmesh.addFace([55, 62, 12, 582, 459, 458], 80)
femmesh.addFace([50, 51, 126, 567, 571, 569], 81)
femmesh.addFace([45, 108, 44, 553, 551, 549], 82)
femmesh.addFace([45, 109, 108, 554, 719, 553], 83)
femmesh.addFace([45, 46, 109, 552, 556, 554], 84)
femmesh.addFace([40, 79, 39, 538, 536, 534], 85)
femmesh.addFace([35, 74, 34, 523, 521, 520], 86)
femmesh.addFace([30, 122, 31, 511, 513, 509], 87)
femmesh.addFace([30, 116, 122, 510, 734, 511], 88)
femmesh.addFace([30, 29, 116, 507, 508, 510], 89)
femmesh.addFace([25, 24, 113, 493, 495, 497], 90)
femmesh.addFace([25, 114, 26, 498, 500, 496], 91)
femmesh.addFace([25, 113, 114, 497, 725, 498], 92)
femmesh.addFace([20, 19, 84, 478, 480, 482], 93)
femmesh.addFace([20, 93, 21, 483, 485, 481], 94)
femmesh.addFace([20, 84, 93, 482, 655, 483], 95)
femmesh.addFace([15, 14, 64, 463, 465, 467], 96)
femmesh.addFace([10, 54, 11, 453, 455, 451], 97)
femmesh.addFace([10, 53, 54, 452, 575, 453], 98)
femmesh.addFace([10, 5, 53, 438, 439, 452], 99)
femmesh.addFace([10, 1, 5, 430, 429, 438], 100)
femmesh.addFace([5, 6, 53, 437, 441, 439], 101)
femmesh.addFace([124, 122, 121, 749, 746, 748], 102)
femmesh.addFace([124, 31, 122, 514, 513, 749], 103)
femmesh.addFace([124, 32, 31, 516, 512, 514], 104)
femmesh.addFace([124, 128, 32, 754, 517, 516], 105)
femmesh.addFace([124, 127, 128, 753, 757, 754], 106)
femmesh.addFace([124, 123, 127, 750, 752, 753], 107)
femmesh.addFace([124, 121, 123, 748, 747, 750], 108)
femmesh.addFace([119, 104, 112, 715, 714, 723], 109)
femmesh.addFace([119, 102, 104, 709, 706, 715], 110)
femmesh.addFace([119, 117, 102, 736, 708, 709], 111)
femmesh.addFace([114, 117, 118, 728, 735, 729], 112)
femmesh.addFace([114, 113, 117, 725, 726, 728], 113)
femmesh.addFace([109, 99, 108, 699, 698, 719], 114)
femmesh.addFace([104, 111, 112, 713, 722, 714], 115)
femmesh.addFace([104, 103, 111, 710, 712, 713], 116)
femmesh.addFace([104, 101, 103, 705, 704, 710], 117)
femmesh.addFace([104, 102, 101, 706, 703, 705], 118)
femmesh.addFace([99, 92, 108, 677, 679, 698], 119)
femmesh.addFace([94, 22, 21, 488, 484, 486], 120)
femmesh.addFace([94, 97, 98, 683, 691, 684], 121)
femmesh.addFace([94, 93, 97, 680, 681, 683], 122)
femmesh.addFace([94, 21, 93, 486, 485, 680], 123)
femmesh.addFace([89, 91, 92, 668, 674, 669], 124)
femmesh.addFace([89, 86, 91, 660, 661, 668], 125)
femmesh.addFace([89, 83, 86, 652, 651, 660], 126)
femmesh.addFace([89, 84, 83, 653, 650, 652], 127)
femmesh.addFace([84, 19, 83, 480, 479, 650], 128)
femmesh.addFace([79, 73, 72, 634, 631, 633], 129)
femmesh.addFace([79, 78, 39, 641, 535, 536], 130)
femmesh.addFace([79, 72, 78, 633, 632, 641], 131)
femmesh.addFace([74, 9, 34, 450, 449, 521], 132)
femmesh.addFace([74, 8, 9, 448, 446, 450], 133)
femmesh.addFace([74, 57, 8, 587, 447, 448], 134)
femmesh.addFace([69, 78, 72, 623, 632, 621], 135)
femmesh.addFace([69, 77, 78, 622, 640, 623], 136)
femmesh.addFace([69, 68, 77, 618, 620, 622], 137)
femmesh.addFace([69, 66, 68, 614, 613, 618], 138)
femmesh.addFace([69, 67, 66, 615, 612, 614], 139)
femmesh.addFace([69, 72, 67, 621, 617, 615], 140)
femmesh.addFace([64, 63, 67, 603, 605, 607], 141)
femmesh.addFace([64, 14, 63, 465, 464, 603], 142)
femmesh.addFace([59, 58, 61, 588, 590, 591], 143)
femmesh.addFace([59, 66, 62, 593, 602, 592], 144)
femmesh.addFace([59, 68, 66, 594, 613, 593], 145)
femmesh.addFace([59, 61, 68, 591, 598, 594], 146)
femmesh.addFace([54, 56, 58, 578, 584, 579], 147)
femmesh.addFace([54, 53, 56, 575, 576, 578], 148)
femmesh.addFace([49, 112, 48, 565, 563, 561], 149)
femmesh.addFace([44, 107, 43, 550, 548, 546], 150)
femmesh.addFace([44, 108, 107, 551, 718, 550], 151)
femmesh.addFace([39, 78, 38, 535, 533, 531], 152)
femmesh.addFace([34, 9, 2, 449, 431, 432], 153)
femmesh.addFace([29, 28, 116, 505, 506, 508], 154)
femmesh.addFace([29, 3, 28, 434, 433, 505], 155)
femmesh.addFace([24, 96, 113, 494, 690, 495], 156)
femmesh.addFace([24, 23, 96, 490, 492, 494], 157)
femmesh.addFace([19, 18, 83, 475, 477, 479], 158)
femmesh.addFace([14, 13, 63, 460, 462, 464], 159)
femmesh.addFace([4, 33, 52, 435, 518, 436], 160)
femmesh.addFace([128, 33, 32, 519, 515, 517], 161)
femmesh.addFace([128, 52, 33, 574, 518, 519], 162)
femmesh.addFace([128, 127, 52, 757, 573, 574], 163)
femmesh.addFace([123, 126, 127, 751, 756, 752], 164)
femmesh.addFace([123, 121, 118, 747, 739, 740], 165)
femmesh.addFace([113, 102, 117, 707, 708, 726], 166)
femmesh.addFace([113, 96, 102, 690, 689, 707], 167)
femmesh.addFace([108, 92, 107, 679, 678, 718], 168)
femmesh.addFace([103, 101, 98, 704, 695, 696], 169)
femmesh.addFace([88, 91, 86, 664, 661, 659], 170)
femmesh.addFace([88, 106, 91, 666, 675, 664], 171)
femmesh.addFace([83, 82, 86, 647, 649, 651], 172)
femmesh.addFace([83, 18, 82, 477, 476, 647], 173)
femmesh.addFace([78, 77, 38, 640, 532, 533], 174)
femmesh.addFace([73, 87, 71, 636, 630, 627], 175)
femmesh.addFace([68, 76, 77, 619, 639, 620], 176)
femmesh.addFace([68, 61, 76, 598, 600, 619], 177)
femmesh.addFace([63, 66, 67, 604, 612, 605], 178)
femmesh.addFace([63, 62, 66, 601, 602, 604], 179)
femmesh.addFace([63, 13, 62, 462, 461, 601], 180)
femmesh.addFace([53, 6, 56, 441, 442, 576], 181)
femmesh.addFace([48, 111, 47, 562, 560, 558], 182)
femmesh.addFace([48, 112, 111, 563, 722, 562], 183)
femmesh.addFace([43, 106, 42, 547, 545, 543], 184)
femmesh.addFace([43, 107, 106, 548, 717, 547], 185)
femmesh.addFace([38, 77, 37, 532, 530, 528], 186)
femmesh.addFace([28, 27, 116, 502, 504, 506], 187)
femmesh.addFace([18, 17, 82, 472, 474, 476], 188)
femmesh.addFace([13, 12, 62, 457, 459, 461], 189)
femmesh.addFace([8, 57, 7, 447, 445, 443], 190)
femmesh.addFace([127, 51, 52, 572, 570, 573], 191)
femmesh.addFace([127, 126, 51, 756, 571, 572], 192)
femmesh.addFace([122, 116, 121, 734, 733, 746], 193)
femmesh.addFace([107, 91, 106, 676, 675, 717], 194)
femmesh.addFace([107, 92, 91, 678, 674, 676], 195)
femmesh.addFace([102, 96, 101, 689, 688, 703], 196)
femmesh.addFace([82, 17, 81, 474, 473, 645], 197)
femmesh.addFace([77, 76, 37, 639, 529, 530], 198)
femmesh.addFace([57, 56, 7, 583, 444, 445], 199)
femmesh.addFace([37, 76, 36, 529, 527, 525], 200)
femmesh.addFace([17, 16, 81, 469, 471, 473], 201)
femmesh.addFace([7, 56, 6, 444, 442, 440], 202)
femmesh.addFace([232, 219, 156, 1025, 837, 838], 203)
femmesh.addFace([232, 226, 219, 1051, 1024, 1025], 204)
femmesh.addFace([232, 227, 226, 1053, 1050, 1051], 205)
femmesh.addFace([232, 233, 227, 1064, 1054, 1053], 206)
femmesh.addFace([232, 157, 233, 840, 841, 1064], 207)
femmesh.addFace([232, 156, 157, 838, 836, 840], 208)
femmesh.addFace([227, 224, 226, 1046, 1045, 1050], 209)
femmesh.addFace([227, 225, 224, 1047, 1044, 1046], 210)
femmesh.addFace([227, 230, 225, 1052, 1049, 1047], 211)
femmesh.addFace([227, 233, 230, 1054, 1060, 1052], 212)
femmesh.addFace([222, 228, 223, 1037, 1040, 1035], 213)
femmesh.addFace([222, 225, 228, 1036, 1048, 1037], 214)
femmesh.addFace([222, 221, 225, 1030, 1032, 1036], 215)
femmesh.addFace([222, 50, 221, 1038, 1034, 1030], 216)
femmesh.addFace([222, 51, 50, 1039, 567, 1038], 217)
femmesh.addFace([222, 223, 51, 1035, 1042, 1039], 218)
femmesh.addFace([217, 216, 153, 1021, 828, 829], 219)
femmesh.addFace([217, 207, 216, 1003, 1002, 1021], 220)
femmesh.addFace([217, 210, 207, 1012, 1001, 1003], 221)
femmesh.addFace([217, 218, 210, 1022, 1013, 1012], 222)
femmesh.addFace([217, 154, 218, 831, 832, 1022], 223)
femmesh.addFace([217, 153, 154, 829, 827, 831], 224)
femmesh.addFace([212, 187, 148, 929, 813, 814], 225)
femmesh.addFace([212, 194, 187, 956, 928, 929], 226)
femmesh.addFace([212, 195, 194, 958, 955, 956], 227)
femmesh.addFace([212, 213, 195, 1017, 959, 958], 228)
femmesh.addFace([212, 149, 213, 816, 817, 1017], 229)
femmesh.addFace([212, 148, 149, 814, 812, 816], 230)
femmesh.addFace([207, 206, 216, 998, 1000, 1002], 231)
femmesh.addFace([207, 204, 206, 994, 993, 998], 232)
femmesh.addFace([207, 205, 204, 995, 992, 994], 233)
femmesh.addFace([207, 210, 205, 1001, 997, 995], 234)
femmesh.addFace([202, 208, 203, 984, 987, 982], 235)
femmesh.addFace([202, 205, 208, 983, 996, 984], 236)
femmesh.addFace([202, 201, 205, 977, 979, 983], 237)
femmesh.addFace([202, 46, 201, 985, 981, 977], 238)
femmesh.addFace([202, 47, 46, 986, 555, 985], 239)
femmesh.addFace([202, 203, 47, 982, 990, 986], 240)
femmesh.addFace([197, 196, 199, 960, 962, 963], 241)
femmesh.addFace([197, 191, 196, 945, 944, 960], 242)
femmesh.addFace([197, 200, 191, 964, 946, 945], 243)
femmesh.addFace([197, 204, 200, 965, 974, 964], 244)
femmesh.addFace([197, 206, 204, 966, 993, 965], 245)
femmesh.addFace([197, 199, 206, 963, 970, 966], 246)
femmesh.addFace([192, 188, 178, 931, 913, 914], 247)
femmesh.addFace([192, 189, 188, 935, 930, 931], 248)
femmesh.addFace([192, 193, 189, 949, 936, 935], 249)
femmesh.addFace([192, 195, 193, 951, 952, 949], 250)
femmesh.addFace([192, 194, 195, 950, 955, 951], 251)
femmesh.addFace([192, 178, 194, 914, 915, 950], 252)
femmesh.addFace([187, 194, 180, 928, 921, 920], 253)
femmesh.addFace([187, 147, 148, 811, 809, 813], 254)
femmesh.addFace([187, 186, 147, 927, 810, 811], 255)
femmesh.addFace([187, 180, 186, 920, 919, 927], 256)
femmesh.addFace([182, 181, 142, 922, 795, 796], 257)
femmesh.addFace([182, 167, 181, 874, 873, 922], 258)
femmesh.addFace([182, 168, 167, 876, 872, 874], 259)
femmesh.addFace([182, 183, 168, 923, 877, 876], 260)
femmesh.addFace([182, 143, 183, 798, 799, 923], 261)
femmesh.addFace([182, 142, 143, 796, 794, 798], 262)
femmesh.addFace([177, 174, 179, 901, 902, 910], 263)
femmesh.addFace([177, 171, 174, 889, 888, 901], 264)
femmesh.addFace([177, 172, 171, 892, 887, 889], 265)
femmesh.addFace([177, 178, 172, 909, 893, 892], 266)
femmesh.addFace([177, 180, 178, 911, 912, 909], 267)
femmesh.addFace([177, 179, 180, 910, 916, 911], 268)
femmesh.addFace([172, 39, 171, 895, 891, 887], 269)
femmesh.addFace([172, 40, 39, 896, 534, 895], 270)
femmesh.addFace([172, 188, 40, 894, 932, 896], 271)
femmesh.addFace([172, 178, 188, 893, 913, 894], 272)
femmesh.addFace([167, 164, 181, 863, 864, 873], 273)
femmesh.addFace([167, 163, 164, 862, 860, 863], 274)
femmesh.addFace([167, 165, 163, 866, 861, 862], 275)
femmesh.addFace([167, 168, 165, 872, 867, 866], 276)
femmesh.addFace([162, 166, 169, 856, 869, 857], 277)
femmesh.addFace([162, 165, 166, 855, 865, 856], 278)
femmesh.addFace([162, 161, 165, 850, 852, 855], 279)
femmesh.addFace([162, 35, 161, 858, 854, 850], 280)
femmesh.addFace([162, 36, 35, 859, 522, 858], 281)
femmesh.addFace([162, 169, 36, 857, 880, 859], 282)
femmesh.addFace([157, 158, 233, 839, 843, 841], 283)
femmesh.addFace([152, 215, 151, 825, 823, 821], 284)
femmesh.addFace([152, 216, 215, 826, 1020, 825], 285)
femmesh.addFace([152, 153, 216, 824, 828, 826], 286)
femmesh.addFace([147, 186, 146, 810, 808, 806], 287)
femmesh.addFace([142, 181, 141, 795, 793, 792], 288)
femmesh.addFace([137, 229, 138, 783, 785, 781], 289)
femmesh.addFace([137, 223, 229, 782, 1041, 783], 290)
femmesh.addFace([137, 136, 223, 777, 778, 782], 291)
femmesh.addFace([132, 160, 131, 767, 763, 762], 292)
femmesh.addFace([132, 163, 160, 768, 848, 767], 293)
femmesh.addFace([132, 133, 163, 766, 770, 768], 294)
femmesh.addFace([49, 48, 220, 561, 1028, 1029], 295)
femmesh.addFace([49, 221, 50, 1033, 1034, 564], 296)
femmesh.addFace([49, 220, 221, 1029, 1026, 1033], 297)
femmesh.addFace([44, 43, 191, 546, 948, 947], 298)
femmesh.addFace([44, 200, 45, 975, 976, 549], 299)
femmesh.addFace([44, 191, 200, 947, 946, 975], 300)
femmesh.addFace([39, 38, 171, 531, 890, 891], 301)
femmesh.addFace([34, 161, 35, 853, 854, 520], 302)
femmesh.addFace([34, 160, 161, 849, 847, 853], 303)
femmesh.addFace([34, 131, 160, 765, 763, 849], 304)
femmesh.addFace([34, 2, 131, 432, 764, 765], 305)
femmesh.addFace([231, 229, 228, 1058, 1055, 1057], 306)
femmesh.addFace([231, 138, 229, 786, 785, 1058], 307)
femmesh.addFace([231, 139, 138, 788, 784, 786], 308)
femmesh.addFace([231, 235, 139, 1063, 789, 788], 309)
femmesh.addFace([231, 234, 235, 1062, 1066, 1063], 310)
femmesh.addFace([231, 230, 234, 1059, 1061, 1062], 311)
femmesh.addFace([231, 228, 230, 1057, 1056, 1059], 312)
femmesh.addFace([226, 211, 219, 1016, 1015, 1024], 313)
femmesh.addFace([226, 209, 211, 1010, 1007, 1016], 314)
femmesh.addFace([226, 224, 209, 1045, 1009, 1010], 315)
femmesh.addFace([221, 224, 225, 1031, 1044, 1032], 316)
femmesh.addFace([221, 220, 224, 1026, 1027, 1031], 317)
femmesh.addFace([216, 206, 215, 1000, 999, 1020], 318)
femmesh.addFace([211, 218, 219, 1014, 1023, 1015], 319)
femmesh.addFace([211, 210, 218, 1011, 1013, 1014], 320)
femmesh.addFace([211, 208, 210, 1006, 1005, 1011], 321)
femmesh.addFace([211, 209, 208, 1007, 1004, 1006], 322)
femmesh.addFace([206, 199, 215, 970, 972, 999], 323)
femmesh.addFace([201, 46, 45, 981, 552, 980], 324)
femmesh.addFace([201, 204, 205, 978, 992, 979], 325)
femmesh.addFace([201, 200, 204, 973, 974, 978], 326)
femmesh.addFace([201, 45, 200, 980, 976, 973], 327)
femmesh.addFace([196, 198, 199, 961, 967, 962], 328)
femmesh.addFace([196, 193, 198, 953, 954, 961], 329)
femmesh.addFace([196, 190, 193, 941, 940, 953], 330)
femmesh.addFace([196, 191, 190, 944, 939, 941], 331)
femmesh.addFace([191, 43, 190, 948, 942, 939], 332)
femmesh.addFace([186, 180, 179, 919, 916, 918], 333)
femmesh.addFace([186, 185, 146, 926, 807, 808], 334)
femmesh.addFace([186, 179, 185, 918, 917, 926], 335)
femmesh.addFace([181, 135, 141, 776, 775, 793], 336)
femmesh.addFace([181, 134, 135, 774, 772, 776], 337)
femmesh.addFace([181, 164, 134, 864, 773, 774], 338)
femmesh.addFace([176, 185, 179, 908, 917, 906], 339)
femmesh.addFace([176, 184, 185, 907, 925, 908], 340)
femmesh.addFace([176, 175, 184, 903, 905, 907], 341)
femmesh.addFace([176, 173, 175, 899, 898, 903], 342)
femmesh.addFace([176, 174, 173, 900, 897, 899], 343)
femmesh.addFace([176, 179, 174, 906, 902, 900], 344)
femmesh.addFace([171, 170, 174, 882, 884, 888], 345)
femmesh.addFace([171, 38, 170, 890, 886, 882], 346)
femmesh.addFace([166, 165, 168, 865, 867, 868], 347)
femmesh.addFace([166, 173, 169, 870, 879, 869], 348)
femmesh.addFace([166, 175, 173, 871, 898, 870], 349)
femmesh.addFace([166, 168, 175, 868, 875, 871], 350)
femmesh.addFace([161, 163, 165, 851, 861, 852], 351)
femmesh.addFace([161, 160, 163, 847, 848, 851], 352)
femmesh.addFace([156, 219, 155, 837, 835, 833], 353)
femmesh.addFace([151, 214, 150, 822, 820, 818], 354)
femmesh.addFace([151, 215, 214, 823, 1019, 822], 355)
femmesh.addFace([146, 185, 145, 807, 805, 803], 356)
femmesh.addFace([141, 135, 129, 775, 758, 759], 357)
femmesh.addFace([136, 52, 223, 779, 1043, 778], 358)
femmesh.addFace([136, 4, 52, 780, 436, 779], 359)
femmesh.addFace([48, 203, 220, 991, 989, 1028], 360)
femmesh.addFace([48, 47, 203, 558, 990, 991], 361)
femmesh.addFace([43, 42, 190, 543, 943, 942], 362)
femmesh.addFace([38, 37, 170, 528, 885, 886], 363)
femmesh.addFace([130, 140, 159, 760, 790, 761], 364)
femmesh.addFace([235, 140, 139, 791, 787, 789], 365)
femmesh.addFace([235, 159, 140, 846, 790, 791], 366)
femmesh.addFace([235, 234, 159, 1066, 845, 846], 367)
femmesh.addFace([230, 233, 234, 1060, 1065, 1061], 368)
femmesh.addFace([230, 228, 225, 1056, 1048, 1049], 369)
femmesh.addFace([220, 209, 224, 1008, 1009, 1027], 370)
femmesh.addFace([220, 203, 209, 989, 988, 1008], 371)
femmesh.addFace([215, 199, 214, 972, 971, 1019], 372)
femmesh.addFace([210, 208, 205, 1005, 996, 997], 373)
femmesh.addFace([195, 198, 193, 957, 954, 952], 374)
femmesh.addFace([195, 213, 198, 959, 968, 957], 375)
femmesh.addFace([190, 189, 193, 934, 936, 940], 376)
femmesh.addFace([190, 42, 189, 943, 938, 934], 377)
femmesh.addFace([185, 184, 145, 925, 804, 805], 378)
femmesh.addFace([180, 194, 178, 921, 915, 912], 379)
femmesh.addFace([175, 183, 184, 904, 924, 905], 380)
femmesh.addFace([175, 168, 183, 875, 877, 904], 381)
femmesh.addFace([170, 173, 174, 883, 897, 884], 382)
femmesh.addFace([170, 169, 173, 878, 879, 883], 383)
femmesh.addFace([170, 37, 169, 885, 881, 878], 384)
femmesh.addFace([155, 218, 154, 834, 832, 830], 385)
femmesh.addFace([155, 219, 218, 835, 1023, 834], 386)
femmesh.addFace([150, 213, 149, 819, 817, 815], 387)
femmesh.addFace([150, 214, 213, 820, 1018, 819], 388)
femmesh.addFace([145, 184, 144, 804, 802, 800], 389)
femmesh.addFace([52, 51, 223, 570, 1042, 1043], 390)
femmesh.addFace([42, 41, 189, 540, 937, 938], 391)
femmesh.addFace([37, 36, 169, 525, 880, 881], 392)
femmesh.addFace([234, 158, 159, 844, 842, 845], 393)
femmesh.addFace([234, 233, 158, 1065, 843, 844], 394)
femmesh.addFace([229, 223, 228, 1041, 1040, 1055], 395)
femmesh.addFace([214, 198, 213, 969, 968, 1018], 396)
femmesh.addFace([214, 199, 198, 971, 967, 969], 397)
femmesh.addFace([209, 203, 208, 988, 987, 1004], 398)
femmesh.addFace([189, 41, 188, 937, 933, 930], 399)
femmesh.addFace([184, 183, 144, 924, 801, 802], 400)
femmesh.addFace([164, 133, 134, 771, 769, 773], 401)
femmesh.addFace([164, 163, 133, 860, 770, 771], 402)
femmesh.addFace([144, 183, 143, 801, 799, 797], 403)
femmesh.addFace([41, 40, 188, 537, 932, 933], 404)
femmesh.addFace([339, 326, 263, 1334, 1146, 1147], 405)
femmesh.addFace([339, 333, 326, 1360, 1333, 1334], 406)
femmesh.addFace([339, 334, 333, 1362, 1359, 1360], 407)
femmesh.addFace([339, 340, 334, 1373, 1363, 1362], 408)
femmesh.addFace([339, 264, 340, 1149, 1150, 1373], 409)
femmesh.addFace([339, 263, 264, 1147, 1145, 1149], 410)
femmesh.addFace([334, 331, 333, 1355, 1354, 1359], 411)
femmesh.addFace([334, 332, 331, 1356, 1353, 1355], 412)
femmesh.addFace([334, 337, 332, 1361, 1358, 1356], 413)
femmesh.addFace([334, 340, 337, 1363, 1369, 1361], 414)
femmesh.addFace([329, 335, 330, 1346, 1349, 1344], 415)
femmesh.addFace([329, 332, 335, 1345, 1357, 1346], 416)
femmesh.addFace([329, 328, 332, 1339, 1341, 1345], 417)
femmesh.addFace([329, 157, 328, 1347, 1343, 1339], 418)
femmesh.addFace([329, 158, 157, 1348, 839, 1347], 419)
femmesh.addFace([329, 330, 158, 1344, 1351, 1348], 420)
femmesh.addFace([324, 323, 260, 1330, 1137, 1138], 421)
femmesh.addFace([324, 314, 323, 1312, 1311, 1330], 422)
femmesh.addFace([324, 317, 314, 1321, 1310, 1312], 423)
femmesh.addFace([324, 325, 317, 1331, 1322, 1321], 424)
femmesh.addFace([324, 261, 325, 1140, 1141, 1331], 425)
femmesh.addFace([324, 260, 261, 1138, 1136, 1140], 426)
femmesh.addFace([319, 294, 255, 1238, 1122, 1123], 427)
femmesh.addFace([319, 301, 294, 1265, 1237, 1238], 428)
femmesh.addFace([319, 302, 301, 1267, 1264, 1265], 429)
femmesh.addFace([319, 320, 302, 1326, 1268, 1267], 430)
femmesh.addFace([319, 256, 320, 1125, 1126, 1326], 431)
femmesh.addFace([319, 255, 256, 1123, 1121, 1125], 432)
femmesh.addFace([314, 313, 323, 1307, 1309, 1311], 433)
femmesh.addFace([314, 311, 313, 1303, 1302, 1307], 434)
femmesh.addFace([314, 312, 311, 1304, 1301, 1303], 435)
femmesh.addFace([314, 317, 312, 1310, 1306, 1304], 436)
femmesh.addFace([309, 315, 310, 1293, 1296, 1291], 437)
femmesh.addFace([309, 312, 315, 1292, 1305, 1293], 438)
femmesh.addFace([309, 308, 312, 1286, 1288, 1292], 439)
femmesh.addFace([309, 153, 308, 1294, 1290, 1286], 440)
femmesh.addFace([309, 154, 153, 1295, 827, 1294], 441)
femmesh.addFace([309, 310, 154, 1291, 1299, 1295], 442)
femmesh.addFace([304, 303, 306, 1269, 1271, 1272], 443)
femmesh.addFace([304, 298, 303, 1254, 1253, 1269], 444)
femmesh.addFace([304, 307, 298, 1273, 1255, 1254], 445)
femmesh.addFace([304, 311, 307, 1274, 1283, 1273], 446)
femmesh.addFace([304, 313, 311, 1275, 1302, 1274], 447)
femmesh.addFace([304, 306, 313, 1272, 1279, 1275], 448)
femmesh.addFace([299, 295, 285, 1240, 1222, 1223], 449)
femmesh.addFace([299, 296, 295, 1244, 1239, 1240], 450)
femmesh.addFace([299, 300, 296, 1258, 1245, 1244], 451)
femmesh.addFace([299, 302, 300, 1260, 1261, 1258], 452)
femmesh.addFace([299, 301, 302, 1259, 1264, 1260], 453)
femmesh.addFace([299, 285, 301, 1223, 1224, 1259], 454)
femmesh.addFace([294, 301, 287, 1237, 1230, 1229], 455)
femmesh.addFace([294, 254, 255, 1120, 1118, 1122], 456)
femmesh.addFace([294, 293, 254, 1236, 1119, 1120], 457)
femmesh.addFace([294, 287, 293, 1229, 1228, 1236], 458)
femmesh.addFace([289, 288, 249, 1231, 1104, 1105], 459)
femmesh.addFace([289, 274, 288, 1183, 1182, 1231], 460)
femmesh.addFace([289, 275, 274, 1185, 1181, 1183], 461)
femmesh.addFace([289, 290, 275, 1232, 1186, 1185], 462)
femmesh.addFace([289, 250, 290, 1107, 1108, 1232], 463)
femmesh.addFace([289, 249, 250, 1105, 1103, 1107], 464)
femmesh.addFace([284, 281, 286, 1210, 1211, 1219], 465)
femmesh.addFace([284, 278, 281, 1198, 1197, 1210], 466)
femmesh.addFace([284, 279, 278, 1201, 1196, 1198], 467)
femmesh.addFace([284, 285, 279, 1218, 1202, 1201], 468)
femmesh.addFace([284, 287, 285, 1220, 1221, 1218], 469)
femmesh.addFace([284, 286, 287, 1219, 1225, 1220], 470)
femmesh.addFace([279, 146, 278, 1204, 1200, 1196], 471)
femmesh.addFace([279, 147, 146, 1205, 806, 1204], 472)
femmesh.addFace([279, 295, 147, 1203, 1241, 1205], 473)
femmesh.addFace([279, 285, 295, 1202, 1222, 1203], 474)
femmesh.addFace([274, 271, 288, 1172, 1173, 1182], 475)
femmesh.addFace([274, 270, 271, 1171, 1169, 1172], 476)
femmesh.addFace([274, 272, 270, 1175, 1170, 1171], 477)
femmesh.addFace([274, 275, 272, 1181, 1176, 1175], 478)
femmesh.addFace([269, 273, 276, 1165, 1178, 1166], 479)
femmesh.addFace([269, 272, 273, 1164, 1174, 1165], 480)
femmesh.addFace([269, 268, 272, 1159, 1161, 1164], 481)
femmesh.addFace([269, 142, 268, 1167, 1163, 1159], 482)
femmesh.addFace([269, 143, 142, 1168, 794, 1167], 483)
femmesh.addFace([269, 276, 143, 1166, 1189, 1168], 484)
femmesh.addFace([264, 265, 340, 1148, 1152, 1150], 485)
femmesh.addFace([259, 322, 258, 1134, 1132, 1130], 486)
femmesh.addFace([259, 323, 322, 1135, 1329, 1134], 487)
femmesh.addFace([259, 260, 323, 1133, 1137, 1135], 488)
femmesh.addFace([254, 293, 253, 1119, 1117, 1115], 489)
femmesh.addFace([249, 288, 248, 1104, 1102, 1101], 490)
femmesh.addFace([244, 336, 245, 1092, 1094, 1090], 491)
femmesh.addFace([244, 330, 336, 1091, 1350, 1092], 492)
femmesh.addFace([244, 243, 330, 1086, 1087, 1091], 493)
femmesh.addFace([239, 267, 238, 1076, 1072, 1071], 494)
femmesh.addFace([239, 270, 267, 1077, 1157, 1076], 495)
femmesh.addFace([239, 240, 270, 1075, 1079, 1077], 496)
femmesh.addFace([156, 155, 327, 833, 1337, 1338], 497)
femmesh.addFace([156, 328, 157, 1342, 1343, 836], 498)
femmesh.addFace([156, 327, 328, 1338, 1335, 1342], 499)
femmesh.addFace([151, 150, 298, 818, 1257, 1256], 500)
femmesh.addFace([151, 307, 152, 1284, 1285, 821], 501)
femmesh.addFace([151, 298, 307, 1256, 1255, 1284], 502)
femmesh.addFace([146, 145, 278, 803, 1199, 1200], 503)
femmesh.addFace([141, 268, 142, 1162, 1163, 792], 504)
femmesh.addFace([141, 267, 268, 1158, 1156, 1162], 505)
femmesh.addFace([141, 238, 267, 1074, 1072, 1158], 506)
femmesh.addFace([141, 129, 238, 759, 1073, 1074], 507)
femmesh.addFace([338, 336, 335, 1367, 1364, 1366], 508)
femmesh.addFace([338, 245, 336, 1095, 1094, 1367], 509)
femmesh.addFace([338, 246, 245, 1097, 1093, 1095], 510)
femmesh.addFace([338, 342, 246, 1372, 1098, 1097], 511)
femmesh.addFace([338, 341, 342, 1371, 1375, 1372], 512)
femmesh.addFace([338, 337, 341, 1368, 1370, 1371], 513)
femmesh.addFace([338, 335, 337, 1366, 1365, 1368], 514)
femmesh.addFace([333, 318, 326, 1325, 1324, 1333], 515)
femmesh.addFace([333, 316, 318, 1319, 1316, 1325], 516)
femmesh.addFace([333, 331, 316, 1354, 1318, 1319], 517)
femmesh.addFace([328, 331, 332, 1340, 1353, 1341], 518)
femmesh.addFace([328, 327, 331, 1335, 1336, 1340], 519)
femmesh.addFace([323, 313, 322, 1309, 1308, 1329], 520)
femmesh.addFace([318, 325, 326, 1323, 1332, 1324], 521)
femmesh.addFace([318, 317, 325, 1320, 1322, 1323], 522)
femmesh.addFace([318, 315, 317, 1315, 1314, 1320], 523)
femmesh.addFace([318, 316, 315, 1316, 1313, 1315], 524)
femmesh.addFace([313, 306, 322, 1279, 1281, 1308], 525)
femmesh.addFace([308, 153, 152, 1290, 824, 1289], 526)
femmesh.addFace([308, 311, 312, 1287, 1301, 1288], 527)
femmesh.addFace([308, 307, 311, 1282, 1283, 1287], 528)
femmesh.addFace([308, 152, 307, 1289, 1285, 1282], 529)
femmesh.addFace([303, 305, 306, 1270, 1276, 1271], 530)
femmesh.addFace([303, 300, 305, 1262, 1263, 1270], 531)
femmesh.addFace([303, 297, 300, 1250, 1249, 1262], 532)
femmesh.addFace([303, 298, 297, 1253, 1248, 1250], 533)
femmesh.addFace([298, 150, 297, 1257, 1251, 1248], 534)
femmesh.addFace([293, 287, 286, 1228, 1225, 1227], 535)
femmesh.addFace([293, 292, 253, 1235, 1116, 1117], 536)
femmesh.addFace([293, 286, 292, 1227, 1226, 1235], 537)
femmesh.addFace([288, 242, 248, 1085, 1084, 1102], 538)
femmesh.addFace([288, 241, 242, 1083, 1081, 1085], 539)
femmesh.addFace([288, 271, 241, 1173, 1082, 1083], 540)
femmesh.addFace([283, 292, 286, 1217, 1226, 1215], 541)
femmesh.addFace([283, 291, 292, 1216, 1234, 1217], 542)
femmesh.addFace([283, 282, 291, 1212, 1214, 1216], 543)
femmesh.addFace([283, 280, 282, 1208, 1207, 1212], 544)
femmesh.addFace([283, 281, 280, 1209, 1206, 1208], 545)
femmesh.addFace([283, 286, 281, 1215, 1211, 1209], 546)
femmesh.addFace([278, 277, 281, 1191, 1193, 1197], 547)
femmesh.addFace([278, 145, 277, 1199, 1195, 1191], 548)
femmesh.addFace([273, 272, 275, 1174, 1176, 1177], 549)
femmesh.addFace([273, 280, 276, 1179, 1188, 1178], 550)
femmesh.addFace([273, 282, 280, 1180, 1207, 1179], 551)
femmesh.addFace([273, 275, 282, 1177, 1184, 1180], 552)
femmesh.addFace([268, 270, 272, 1160, 1170, 1161], 553)
femmesh.addFace([268, 267, 270, 1156, 1157, 1160], 554)
femmesh.addFace([263, 326, 262, 1146, 1144, 1142], 555)
femmesh.addFace([258, 321, 257, 1131, 1129, 1127], 556)
femmesh.addFace([258, 322, 321, 1132, 1328, 1131], 557)
femmesh.addFace([253, 292, 252, 1116, 1114, 1112], 558)
femmesh.addFace([248, 242, 236, 1084, 1067, 1068], 559)
femmesh.addFace([243, 159, 330, 1088, 1352, 1087], 560)
femmesh.addFace([243, 130, 159, 1089, 761, 1088], 561)
femmesh.addFace([155, 310, 327, 1300, 1298, 1337], 562)
femmesh.addFace([155, 154, 310, 830, 1299, 1300], 563)
femmesh.addFace([150, 149, 297, 815, 1252, 1251], 564)
femmesh.addFace([145, 144, 277, 800, 1194, 1195], 565)
femmesh.addFace([237, 247, 266, 1069, 1099, 1070], 566)
femmesh.addFace([342, 247, 246, 1100, 1096, 1098], 567)
femmesh.addFace([342, 266, 247, 1155, 1099, 1100], 568)
femmesh.addFace([342, 341, 266, 1375, 1154, 1155], 569)
femmesh.addFace([337, 340, 341, 1369, 1374, 1370], 570)
femmesh.addFace([337, 335, 332, 1365, 1357, 1358], 571)
femmesh.addFace([327, 316, 331, 1317, 1318, 1336], 572)
femmesh.addFace([327, 310, 316, 1298, 1297, 1317], 573)
femmesh.addFace([322, 306, 321, 1281, 1280, 1328], 574)
femmesh.addFace([317, 315, 312, 1314, 1305, 1306], 575)
femmesh.addFace([302, 305, 300, 1266, 1263, 1261], 576)
femmesh.addFace([302, 320, 305, 1268, 1277, 1266], 577)
femmesh.addFace([297, 296, 300, 1243, 1245, 1249], 578)
femmesh.addFace([297, 149, 296, 1252, 1247, 1243], 579)
femmesh.addFace([292, 291, 252, 1234, 1113, 1114], 580)
femmesh.addFace([287, 301, 285, 1230, 1224, 1221], 581)
femmesh.addFace([282, 290, 291, 1213, 1233, 1214], 582)
femmesh.addFace([282, 275, 290, 1184, 1186, 1213], 583)
femmesh.addFace([277, 280, 281, 1192, 1206, 1193], 584)
femmesh.addFace([277, 276, 280, 1187, 1188, 1192], 585)
femmesh.addFace([277, 144, 276, 1194, 1190, 1187], 586)
femmesh.addFace([262, 325, 261, 1143, 1141, 1139], 587)
femmesh.addFace([262, 326, 325, 1144, 1332, 1143], 588)
femmesh.addFace([257, 320, 256, 1128, 1126, 1124], 589)
femmesh.addFace([257, 321, 320, 1129, 1327, 1128], 590)
femmesh.addFace([252, 291, 251, 1113, 1111, 1109], 591)
femmesh.addFace([159, 158, 330, 842, 1351, 1352], 592)
femmesh.addFace([149, 148, 296, 812, 1246, 1247], 593)
femmesh.addFace([144, 143, 276, 797, 1189, 1190], 594)
femmesh.addFace([341, 265, 266, 1153, 1151, 1154], 595)
femmesh.addFace([341, 340, 265, 1374, 1152, 1153], 596)
femmesh.addFace([336, 330, 335, 1350, 1349, 1364], 597)
femmesh.addFace([321, 305, 320, 1278, 1277, 1327], 598)
femmesh.addFace([321, 306, 305, 1280, 1276, 1278], 599)
femmesh.addFace([316, 310, 315, 1297, 1296, 1313], 600)
femmesh.addFace([296, 148, 295, 1246, 1242, 1239], 601)
femmesh.addFace([291, 290, 251, 1233, 1110, 1111], 602)
femmesh.addFace([271, 240, 241, 1080, 1078, 1082], 603)
femmesh.addFace([271, 270, 240, 1169, 1079, 1080], 604)
femmesh.addFace([251, 290, 250, 1110, 1108, 1106], 605)
femmesh.addFace([148, 147, 295, 809, 1241, 1242], 606)
femmesh.addFace([425, 412, 25, 1614, 1616, 1656], 607)
femmesh.addFace([425, 419, 412, 1642, 1613, 1614], 608)
femmesh.addFace([425, 420, 419, 1644, 1641, 1642], 609)
femmesh.addFace([425, 426, 420, 1655, 1645, 1644], 610)
femmesh.addFace([425, 26, 426, 1657, 1659, 1655], 611)
femmesh.addFace([425, 25, 26, 1656, 496, 1657], 612)
femmesh.addFace([420, 417, 419, 1637, 1636, 1641], 613)
femmesh.addFace([420, 418, 417, 1638, 1635, 1637], 614)
femmesh.addFace([420, 423, 418, 1643, 1640, 1638], 615)
femmesh.addFace([420, 426, 423, 1645, 1651, 1643], 616)
femmesh.addFace([415, 421, 416, 1628, 1631, 1626], 617)
femmesh.addFace([415, 418, 421, 1627, 1639, 1628], 618)
femmesh.addFace([415, 414, 418, 1621, 1623, 1627], 619)
femmesh.addFace([415, 264, 414, 1629, 1625, 1621], 620)
femmesh.addFace([415, 265, 264, 1630, 1148, 1629], 621)
femmesh.addFace([415, 416, 265, 1626, 1633, 1630], 622)
femmesh.addFace([410, 409, 22, 1604, 1606, 1608], 623)
femmesh.addFace([410, 400, 409, 1578, 1577, 1604], 624)
femmesh.addFace([410, 403, 400, 1587, 1576, 1578], 625)
femmesh.addFace([410, 411, 403, 1607, 1588, 1587], 626)
femmesh.addFace([410, 23, 411, 1609, 1611, 1607], 627)
femmesh.addFace([410, 22, 23, 1608, 487, 1609], 628)
femmesh.addFace([405, 380, 17, 1502, 1504, 1593], 629)
femmesh.addFace([405, 387, 380, 1531, 1501, 1502], 630)
femmesh.addFace([405, 388, 387, 1533, 1530, 1531], 631)
femmesh.addFace([405, 406, 388, 1592, 1534, 1533], 632)
femmesh.addFace([405, 18, 406, 1594, 1597, 1592], 633)
femmesh.addFace([405, 17, 18, 1593, 472, 1594], 634)
femmesh.addFace([400, 399, 409, 1573, 1575, 1577], 635)
femmesh.addFace([400, 397, 399, 1569, 1568, 1573], 636)
femmesh.addFace([400, 398, 397, 1570, 1567, 1569], 637)
femmesh.addFace([400, 403, 398, 1576, 1572, 1570], 638)
femmesh.addFace([395, 401, 396, 1559, 1562, 1557], 639)
femmesh.addFace([395, 398, 401, 1558, 1571, 1559], 640)
femmesh.addFace([395, 394, 398, 1552, 1554, 1558], 641)
femmesh.addFace([395, 260, 394, 1560, 1556, 1552], 642)
femmesh.addFace([395, 261, 260, 1561, 1136, 1560], 643)
femmesh.addFace([395, 396, 261, 1557, 1565, 1561], 644)
femmesh.addFace([390, 389, 392, 1535, 1537, 1538], 645)
femmesh.addFace([390, 384, 389, 1520, 1519, 1535], 646)
femmesh.addFace([390, 393, 384, 1539, 1521, 1520], 647)
femmesh.addFace([390, 397, 393, 1540, 1549, 1539], 648)
femmesh.addFace([390, 399, 397, 1541, 1568, 1540], 649)
femmesh.addFace([390, 392, 399, 1538, 1545, 1541], 650)
femmesh.addFace([385, 381, 371, 1506, 1474, 1475], 651)
femmesh.addFace([385, 382, 381, 1510, 1505, 1506], 652)
femmesh.addFace([385, 386, 382, 1524, 1511, 1510], 653)
femmesh.addFace([385, 388, 386, 1526, 1527, 1524], 654)
femmesh.addFace([385, 387, 388, 1525, 1530, 1526], 655)
femmesh.addFace([385, 371, 387, 1475, 1476, 1525], 656)
femmesh.addFace([380, 387, 373, 1501, 1482, 1481], 657)
femmesh.addFace([380, 16, 17, 1503, 469, 1504], 658)
femmesh.addFace([380, 379, 16, 1498, 1500, 1503], 659)
femmesh.addFace([380, 373, 379, 1481, 1480, 1498], 660)
femmesh.addFace([375, 374, 11, 1483, 1485, 1487], 661)
femmesh.addFace([375, 360, 374, 1435, 1434, 1483], 662)
femmesh.addFace([375, 361, 360, 1437, 1433, 1435], 663)
femmesh.addFace([375, 376, 361, 1486, 1438, 1437], 664)
femmesh.addFace([375, 12, 376, 1488, 1490, 1486], 665)
femmesh.addFace([375, 11, 12, 1487, 454, 1488], 666)
femmesh.addFace([370, 367, 372, 1462, 1463, 1471], 667)
femmesh.addFace([370, 364, 367, 1450, 1449, 1462], 668)
femmesh.addFace([370, 365, 364, 1453, 1448, 1450], 669)
femmesh.addFace([370, 371, 365, 1470, 1454, 1453], 670)
femmesh.addFace([370, 373, 371, 1472, 1473, 1470], 671)
femmesh.addFace([370, 372, 373, 1471, 1477, 1472], 672)
femmesh.addFace([365, 253, 364, 1456, 1452, 1448], 673)
femmesh.addFace([365, 254, 253, 1457, 1115, 1456], 674)
femmesh.addFace([365, 381, 254, 1455, 1507, 1457], 675)
femmesh.addFace([365, 371, 381, 1454, 1474, 1455], 676)
femmesh.addFace([360, 357, 374, 1424, 1425, 1434], 677)
femmesh.addFace([360, 356, 357, 1423, 1421, 1424], 678)
femmesh.addFace([360, 358, 356, 1427, 1422, 1423], 679)
femmesh.addFace([360, 361, 358, 1433, 1428, 1427], 680)
femmesh.addFace([355, 359, 362, 1417, 1430, 1418], 681)
femmesh.addFace([355, 358, 359, 1416, 1426, 1417], 682)
femmesh.addFace([355, 354, 358, 1411, 1413, 1416], 683)
femmesh.addFace([355, 249, 354, 1419, 1415, 1411], 684)
femmesh.addFace([355, 250, 249, 1420, 1103, 1419], 685)
femmesh.addFace([355, 362, 250, 1418, 1441, 1420], 686)
femmesh.addFace([350, 349, 422, 1396, 1398, 1400], 687)
femmesh.addFace([350, 424, 351, 1401, 1403, 1399], 688)
femmesh.addFace([350, 422, 424, 1400, 1649, 1401], 689)
femmesh.addFace([345, 356, 344, 1384, 1382, 1380], 690)
femmesh.addFace([345, 357, 356, 1385, 1421, 1384], 691)
femmesh.addFace([345, 346, 357, 1383, 1387, 1385], 692)
femmesh.addFace([264, 263, 414, 1145, 1624, 1625], 693)
femmesh.addFace([259, 258, 393, 1130, 1550, 1551], 694)
femmesh.addFace([259, 394, 260, 1555, 1556, 1133], 695)
femmesh.addFace([259, 393, 394, 1551, 1548, 1555], 696)
femmesh.addFace([254, 381, 255, 1507, 1508, 1118], 697)
femmesh.addFace([249, 248, 354, 1101, 1414, 1415], 698)
femmesh.addFace([25, 412, 24, 1616, 1615, 493], 699)
femmesh.addFace([20, 407, 19, 1599, 1600, 478], 700)
femmesh.addFace([20, 408, 407, 1602, 1598, 1599], 701)
femmesh.addFace([20, 21, 408, 481, 1603, 1602], 702)
femmesh.addFace([15, 378, 14, 1497, 1496, 463], 703)
femmesh.addFace([15, 379, 378, 1499, 1495, 1497], 704)
femmesh.addFace([15, 16, 379, 466, 1500, 1499], 705)
femmesh.addFace([10, 347, 1, 1391, 1390, 430], 706)
femmesh.addFace([10, 374, 347, 1484, 1389, 1391], 707)
femmesh.addFace([10, 11, 374, 451, 1485, 1484], 708)
femmesh.addFace([424, 422, 421, 1649, 1646, 1648], 709)
femmesh.addFace([424, 428, 351, 1654, 1404, 1403], 710)
femmesh.addFace([424, 427, 428, 1653, 1661, 1654], 711)
femmesh.addFace([424, 423, 427, 1650, 1652, 1653], 712)
femmesh.addFace([424, 421, 423, 1648, 1647, 1650], 713)
femmesh.addFace([419, 404, 412, 1591, 1590, 1613], 714)
femmesh.addFace([419, 402, 404, 1585, 1582, 1591], 715)
femmesh.addFace([419, 417, 402, 1636, 1584, 1585], 716)
femmesh.addFace([414, 417, 418, 1622, 1635, 1623], 717)
femmesh.addFace([414, 413, 417, 1617, 1618, 1622], 718)
femmesh.addFace([414, 263, 413, 1624, 1620, 1617], 719)
femmesh.addFace([409, 21, 22, 1605, 484, 1606], 720)
femmesh.addFace([409, 408, 21, 1601, 1603, 1605], 721)
femmesh.addFace([409, 399, 408, 1575, 1574, 1601], 722)
femmesh.addFace([404, 411, 412, 1589, 1610, 1590], 723)
femmesh.addFace([404, 403, 411, 1586, 1588, 1589], 724)
femmesh.addFace([404, 401, 403, 1581, 1580, 1586], 725)
femmesh.addFace([404, 402, 401, 1582, 1579, 1581], 726)
femmesh.addFace([399, 392, 408, 1545, 1547, 1574], 727)
femmesh.addFace([394, 397, 398, 1553, 1567, 1554], 728)
femmesh.addFace([394, 393, 397, 1548, 1549, 1553], 729)
femmesh.addFace([389, 391, 392, 1536, 1542, 1537], 730)
femmesh.addFace([389, 386, 391, 1528, 1529, 1536], 731)
femmesh.addFace([389, 383, 386, 1516, 1515, 1528], 732)
femmesh.addFace([389, 384, 383, 1519, 1514, 1516], 733)
femmesh.addFace([384, 393, 258, 1521, 1550, 1522], 734)
femmesh.addFace([384, 257, 383, 1523, 1517, 1514], 735)
femmesh.addFace([384, 258, 257, 1522, 1127, 1523], 736)
femmesh.addFace([379, 372, 378, 1479, 1478, 1495], 737)
femmesh.addFace([379, 373, 372, 1480, 1477, 1479], 738)
femmesh.addFace([374, 346, 347, 1388, 1386, 1389], 739)
femmesh.addFace([374, 357, 346, 1425, 1387, 1388], 740)
femmesh.addFace([369, 378, 372, 1469, 1478, 1467], 741)
femmesh.addFace([369, 377, 378, 1468, 1492, 1469], 742)
femmesh.addFace([369, 368, 377, 1464, 1466, 1468], 743)
femmesh.addFace([369, 366, 368, 1460, 1459, 1464], 744)
femmesh.addFace([369, 367, 366, 1461, 1458, 1460], 745)
femmesh.addFace([369, 372, 367, 1467, 1463, 1461], 746)
femmesh.addFace([364, 363, 367, 1443, 1445, 1449], 747)
femmesh.addFace([364, 252, 363, 1451, 1447, 1443], 748)
femmesh.addFace([364, 253, 252, 1452, 1112, 1451], 749)
femmesh.addFace([359, 358, 361, 1426, 1428, 1429], 750)
femmesh.addFace([359, 366, 362, 1431, 1440, 1430], 751)
femmesh.addFace([359, 368, 366, 1432, 1459, 1431], 752)
femmesh.addFace([359, 361, 368, 1429, 1436, 1432], 753)
femmesh.addFace([354, 356, 358, 1412, 1422, 1413], 754)
femmesh.addFace([354, 353, 356, 1408, 1409, 1412], 755)
femmesh.addFace([354, 248, 353, 1414, 1410, 1408], 756)
femmesh.addFace([349, 416, 422, 1397, 1632, 1398], 757)
femmesh.addFace([349, 348, 416, 1392, 1393, 1397], 758)
femmesh.addFace([344, 353, 343, 1381, 1377, 1376], 759)
femmesh.addFace([344, 356, 353, 1382, 1409, 1381], 760)
femmesh.addFace([263, 262, 413, 1142, 1619, 1620], 761)
femmesh.addFace([248, 343, 353, 1379, 1377, 1410], 762)
femmesh.addFace([248, 236, 343, 1068, 1378, 1379], 763)
femmesh.addFace([24, 411, 23, 1612, 1611, 490], 764)
femmesh.addFace([24, 412, 411, 1615, 1610, 1612], 765)
femmesh.addFace([19, 406, 18, 1596, 1597, 475], 766)
femmesh.addFace([19, 407, 406, 1600, 1595, 1596], 767)
femmesh.addFace([14, 377, 13, 1494, 1493, 460], 768)
femmesh.addFace([14, 378, 377, 1496, 1492, 1494], 769)
femmesh.addFace([3, 352, 28, 1407, 1406, 433], 770)
femmesh.addFace([428, 352, 351, 1405, 1402, 1404], 771)
femmesh.addFace([428, 28, 352, 1664, 1406, 1405], 772)
femmesh.addFace([428, 427, 28, 1661, 1663, 1664], 773)
femmesh.addFace([423, 426, 427, 1651, 1658, 1652], 774)
femmesh.addFace([423, 421, 418, 1647, 1639, 1640], 775)
femmesh.addFace([413, 402, 417, 1583, 1584, 1618], 776)
femmesh.addFace([413, 396, 402, 1564, 1563, 1583], 777)
femmesh.addFace([413, 262, 396, 1619, 1566, 1564], 778)
femmesh.addFace([408, 392, 407, 1547, 1546, 1598], 779)
femmesh.addFace([403, 401, 398, 1580, 1571, 1572], 780)
femmesh.addFace([388, 391, 386, 1532, 1529, 1527], 781)
femmesh.addFace([388, 406, 391, 1534, 1543, 1532], 782)
femmesh.addFace([383, 382, 386, 1509, 1511, 1515], 783)
femmesh.addFace([383, 256, 382, 1518, 1513, 1509], 784)
femmesh.addFace([383, 257, 256, 1517, 1124, 1518], 785)
femmesh.addFace([373, 387, 371, 1482, 1476, 1473], 786)
femmesh.addFace([368, 376, 377, 1465, 1489, 1466], 787)
femmesh.addFace([368, 361, 376, 1436, 1438, 1465], 788)
femmesh.addFace([363, 362, 366, 1439, 1440, 1444], 789)
femmesh.addFace([363, 251, 362, 1446, 1442, 1439], 790)
femmesh.addFace([363, 252, 251, 1447, 1109, 1446], 791)
femmesh.addFace([363, 366, 367, 1444, 1458, 1445], 792)
femmesh.addFace([348, 266, 416, 1394, 1634, 1393], 793)
femmesh.addFace([348, 237, 266, 1395, 1070, 1394], 794)
femmesh.addFace([262, 261, 396, 1139, 1565, 1566], 795)
femmesh.addFace([28, 427, 27, 1663, 1662, 502], 796)
femmesh.addFace([13, 376, 12, 1491, 1490, 457], 797)
femmesh.addFace([13, 377, 376, 1493, 1489, 1491], 798)
femmesh.addFace([427, 426, 27, 1658, 1660, 1662], 799)
femmesh.addFace([422, 416, 421, 1632, 1631, 1646], 800)
femmesh.addFace([407, 391, 406, 1544, 1543, 1595], 801)
femmesh.addFace([407, 392, 391, 1546, 1542, 1544], 802)
femmesh.addFace([402, 396, 401, 1563, 1562, 1579], 803)
femmesh.addFace([382, 255, 381, 1512, 1508, 1505], 804)
femmesh.addFace([382, 256, 255, 1513, 1121, 1512], 805)
femmesh.addFace([362, 251, 250, 1442, 1106, 1441], 806)
femmesh.addFace([266, 265, 416, 1151, 1633, 1634], 807)
femmesh.addFace([27, 426, 26, 1660, 1659, 499], 808)
return True
| lgpl-2.1 | 7,943,723,131,867,216,000 | 51.745968 | 59 | 0.63486 | false |
Diti24/python-ivi | ivi/agilent/agilentDSO6054A.py | 1 | 1686 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent6000 import *
class agilentDSO6054A(agilent6000):
"Agilent InfiniiVision DSO6054A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO6054A')
super(agilentDSO6054A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit | -8,314,626,367,419,297,000 | 37.318182 | 86 | 0.736655 | false |
mapnik/mapnik | scons/scons-local-4.1.0/SCons/Variables/EnumVariable.py | 4 | 3741 | # MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Option type for enumeration Variables.
This file defines the option type for SCons allowing only specified
input-values.
Usage example::
opts = Variables()
opts.Add(
EnumVariable(
'debug',
'debug output and symbols',
'no',
allowed_values=('yes', 'no', 'full'),
map={},
ignorecase=2,
)
)
...
if env['debug'] == 'full':
...
"""
__all__ = ['EnumVariable',]
import SCons.Errors
def _validator(key, val, env, vals):
if val not in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s. Valid values are: %s' % (key, val, vals))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe an option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Variables.Add().
'key' and 'default' are the values to be passed on to Variables.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (e.g. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed values. The 'converter' converts input values
according to the given 'map'-dictionary (unmapped input values are returned unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | 1,671,740,163,989,472,000 | 33.962617 | 122 | 0.677092 | false |
foursquare/pants | contrib/python/tests/python/pants_test/contrib/python/checks/tasks/checkstyle/test_common.py | 1 | 7647 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import textwrap
import unittest
from builtins import range, str
from pants_test.option.util.fakes import create_options
from pants.contrib.python.checks.tasks.checkstyle.common import (CheckstylePlugin, CheckSyntaxError,
Nit, OffByOneList, PythonFile)
FILE_TEXT = """
import ast
from os.path import (
join,
split,
)
import zookeeper
class Keeper(object):
def __init__(self):
self._session = None
def session(self):
return self._session
"""
class MinimalCheckstylePlugin(CheckstylePlugin):
"""Minimal Checkstyle plugin used to test PythonFile interactions in Plugin."""
def nits(self):
return []
class CommonTest(unittest.TestCase):
def _statement_for_testing(self):
"""Pytest Fixture to create a test python file from statement."""
return '\n'.join(textwrap.dedent(FILE_TEXT).split('\n')[1:])
def _python_file_for_testing(self):
"""Pytest Fixture to create a test python file from statement."""
return PythonFile.from_statement(self._statement_for_testing(), 'keeper.py')
def _plugin_for_testing(self):
options_object = create_options({'foo': {'skip': False}}).for_scope('foo')
return MinimalCheckstylePlugin(options_object, self._python_file_for_testing())
def test_python_file_name(self):
"""Test that filename attrib is getting set properly."""
self.assertEqual('keeper.py', self._python_file_for_testing().filename)
def test_syntax_error_in_parsing(self):
with self.assertRaises(CheckSyntaxError) as cm:
PythonFile.from_statement("""print('unfinished""",'myfile.py')
self.assertEqual(
"""E901:ERROR myfile.py:001 SyntaxError: EOL while scanning string literal\n"""
""" |print('unfinished""",
str(cm.exception.as_nit()))
def test_python_file_logical_lines(self):
"""Test that we get back logical lines we expect."""
self.assertEqual({
1: (1, 2, 0), # import ast
2: (2, 6, 0), # from os.path import (", " join,", " split,", ")
7: (7, 8, 0), # import zookeeper
10: (10, 11, 0), # class Keeper(object):
11: (11, 12, 2), # def __init__(self):
12: (12, 13, 4), # self._session = None
14: (14, 15, 2), # def session(self):
15: (15, 16, 4), # return self._session
}, self._python_file_for_testing().logical_lines)
def test_python_file_index_offset(self):
"""Test that we can not index into a python file with 0.
PythonFile is offset by one to match users expectations with file line numbering.
"""
with self.assertRaises(IndexError):
self._python_file_for_testing()[0]
def test_python_file_exceeds_index(self):
"""Test that we get an Index error when we exceed the line number."""
with self.assertRaises(IndexError):
self._python_file_for_testing()[len(self._statement_for_testing().split('\n')) + 1]
def test_line_retrieval(self):
"""Test that we get lines correctly when accessed by index."""
expected = [
["import ast"],
["from os.path import (", " join,", " split,", ")"],
[" join,"],
[" split,"],
[")"],
[""],
["import zookeeper"],
[""],
[""],
["class Keeper(object):"],
[" def __init__(self):"],
[" self._session = None"],
[""],
[" def session(self):"],
[" return self._session"],
[""]
]
self.assertEqual(expected, [self._python_file_for_testing()[x] for x in range(1,17)])
def test_rejoin(self):
"""Test that when we stitch the PythonFile back up we get back our input."""
self.assertEqual(self._statement_for_testing(), '\n'.join(self._python_file_for_testing()))
def test_off_by_one_enumeration(self):
"""Test that enumerate is offset by one."""
self.assertEqual(list(enumerate(self._statement_for_testing().split('\n'), 1)),
list(self._python_file_for_testing().enumerate()))
def test_line_number_return(self):
for ln_test_input, ln_test_expected in [
(['A123', 'You have a terrible taste in libraries'], None),
(['A123', 'You have a terrible taste in libraries', 7], '007'),
(['A123', 'You have a terrible taste in libraries', 2], '002-005'),
]:
error = self._plugin_for_testing().error(*ln_test_input)
self.assertEqual(ln_test_expected, error.line_number)
def test_code_return(self):
for code_test_input, code_test_expected in [
(['A123', 'You have a terrible taste in libraries'], 'A123'),
(['A321', 'You have a terrible taste in libraries', 2], 'A321'),
(['B321', 'You have a terrible taste in libraries', 7], 'B321'),
]:
error = self._plugin_for_testing().error(*code_test_input)
self.assertEqual(code_test_expected, error.code)
def test_error_severity(self):
"""Test that we get Nit.Error when calling error."""
error = self._plugin_for_testing().error('A123', 'Uh-oh this is bad')
self.assertEqual(Nit.ERROR, error.severity)
def test_warn_severity(self):
"""Test that we get Nit.WARNING when calling warning."""
error = self._plugin_for_testing().warning('A123', 'No worries, its just a warning')
self.assertEqual(Nit.WARNING, error.severity)
def test_style_error(self):
"""Test error with actual AST node.
Verify that when we fetch a node form AST and create an error we get the
same result as generating the error manually.
"""
plugin = MinimalCheckstylePlugin({}, PythonFile.from_statement(FILE_TEXT))
import_from = None
for node in ast.walk(self._python_file_for_testing().tree):
if isinstance(node, ast.ImportFrom):
import_from = node
ast_error = plugin.error('B380', "I don't like your from import!", import_from)
error = plugin.error('B380', "I don't like your from import!", 2)
self.assertEqual(str(ast_error), str(error))
def test_python_file_absolute_path_and_root_fails(self):
with self.assertRaises(ValueError):
PythonFile.parse('/absolute/dir', root='/other/abs/dir')
def test_index_error_with_data(self):
"""Test index errors with data in list."""
test_list = OffByOneList([])
for k in (0, 4):
with self.assertRaises(IndexError):
test_list[k]
def test_index_error_no_data(self):
"""Test that when start or end are -1,0, or 1 we get an index error."""
for index in [-1, 0, 1, slice(-1,0), slice(0,1)]:
test_list = OffByOneList([])
with self.assertRaises(IndexError):
test_list[index]
def test_empty_slice(self):
"""Test that we get an empty list if no elements in slice."""
test_list = OffByOneList([])
for s in (slice(1, 1), slice(1, 2), slice(-2, -1)):
self.assertEqual([], test_list[s])
def test_off_by_one(self):
"""Test that you fetch the value you put in."""
test_list = OffByOneList(['1', '2', '3'])
for k in (1, 2, 3):
self.assertEqual(str(k), test_list[k])
self.assertEqual([str(k)], test_list[k:k + 1])
self.assertEqual(k, test_list.index(str(k)))
self.assertEqual(1, test_list.count(str(k)))
self.assertEqual(['3', '2', '1'], list(reversed(test_list)))
def test_index_type(self):
test_list = OffByOneList([])
# Test index type sanity.
for value in (None, 2.0, type):
with self.assertRaises(TypeError):
test_list[value]
| apache-2.0 | 439,934,107,544,829,600 | 35.241706 | 100 | 0.630574 | false |
singhj/locality-sensitive-hashing | cass_result.py | 2 | 19282 | import sys, os, re, time, math, random, struct, zipfile, operator, csv, hashlib, uuid, pdb
from collections import defaultdict
dir_path = os.path.dirname([p for p in sys.path if p][0])
sys.path.insert(0, 'libs')
import logging
LOG_FILENAME = dir_path+'/CassDriver.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
from lsh.shingles.shingles import _get_list_of_shingles
from lsh.utils.similarity import compute_positive_hash
from bs4 import BeautifulSoup
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement, dict_factory
from cassandra import ConsistencyLevel, InvalidRequest
from utils.procache import Cache
from utils.levenshtein import levenshtein
shingle_cache = Cache(max_size = 1)
max_bits = 32
max_mask = 2**max_bits - 1
text_file_pattern = re.compile('^{"id":"([^"]*):html","text":"(.*)}', flags=re.DOTALL)
symbols = re.compile('\W+')
class UnableToCreateTable(Exception):
pass
class UnknownException(Exception):
pass
class CassandraInt(object):
@staticmethod
def to_db(number):
signed = struct.unpack('=l', struct.pack('=L', number & max_mask))[0]
return signed
@staticmethod
def fm_db(number):
return max_mask & number
class CassandraTable(type):
"""
A singleton metaclass to ensure that the table exists in Cassandra
Inspired by http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
_instances = {}
def __call__(cls, *args, **kwds):
if cls not in cls._instances:
try:
rows = session.execute('SELECT COUNT(*) FROM {name}'.format(name = kwds['name']))
logging.debug('Table %s exists', kwds['name'])
except InvalidRequest as err:
remsg = re.compile(r'code=(\d*).*')
found = remsg.search(err.message)
code = int('0'+found.group(1))
if code == 2200:
qstring = 'create table {name} ( {attrs} )'.format(name = kwds['name'], attrs = ', '.join(kwds['attrs']))
try:
session.execute(qstring)
except:
raise UnableToCreateTable(kwds['name'])
else:
raise UnknownException()
logging.debug('Table %s was created', kwds['name'])
cls._instances[cls] = super(CassandraTable, cls).__call__(*args, **{})
return cls._instances[cls]
class DatasetPB(object):
__metaclass__ = CassandraTable
attrs = [
'ds_key text primary key',
'source text',
'filename text',
'lsh_output text',
'eval_output text',
'count_output text',
'random_seeds list<bigint>',
'buckets list<int>',
'rows int',
'bands int',
'shingle_type ascii',
'minhash_modulo int',
]
def __init__(self):
qry = "SELECT * FROM {name} WHERE ds_key=?".format(name = self.__class__.__name__)
self.select = session.prepare(qry)
self.select.consistency_level = ConsistencyLevel.QUORUM
doc = Document(name = Document.__class__.__name__, attrs = Document.attrs)
self.doc_query = "SELECT * FROM Document WHERE ds_key=? AND doc_id=?"
self.doc_select = session.prepare(self.doc_query)
self.bkt_query = "SELECT buckets FROM Document WHERE ds_key=? AND doc_id=?"
self.bkt_select = session.prepare(self.bkt_query)
self.nns_query = "SELECT doc_id, minhashes FROM Document WHERE ds_key=? AND buckets CONTAINS ?"
self.nns_select = session.prepare(self.nns_query)
self.doc_ids_query = "SELECT doc_id FROM Document WHERE ds_key=? ALLOW FILTERING"
self.doc_ids_select = session.prepare(self.doc_ids_query)
def get(self, ds_key):
if ds_key:
ds = session.execute(self.select, [ds_key])
try:
if len(ds) == 1:
ds = ds[0]
for attr in ds:
if attr in ('random_seeds', 'buckets'):
if ds[attr]:
logging.info('retrieved dataset[%s][0] type %s, value %s', attr, type(ds[attr][0]), max_mask & ds[attr][0])
else:
logging.info('retrieved dataset[%s] type %s, value %s', attr, type(ds[attr]), ds[attr])
return ds
except:
pass
return ds
@classmethod
def find(cls, ds_key):
ds = DatasetPB(name = cls.__name__, attrs = cls.attrs)
dataset = ds.get(ds_key)
for k in dataset.keys():
setattr(ds, k, dataset[k])
try:
band_bits = int(math.ceil(math.log(ds.bands, 2)))
band_mask = (2**band_bits - 1)
setattr(ds, 'band_bits', band_bits)
setattr(ds, 'band_mask', band_mask)
setattr(ds, 'hash_mask', 2**(max_bits - band_bits)-1)
except:
raise Exception('Unable to compute band_bits for dataset')
return ds
@classmethod
def create(cls, source, filename,
rows=15, bands=15, shingle_type='c4', minhash_modulo=7001):
# Make sure the underlying tables exist
ds = DatasetPB(name = cls.__name__, attrs = cls.attrs)
max_iters = 4
for iter_count in xrange(max_iters):
ds_key = '%04d' % (abs(hash(source + filename + ' ' * iter_count)) % (10 ** 4))
try:
# Does a dataset with this ID already exist?
this_ds = ds.get(ds_key)
if not this_ds:
break
if this_ds['filename'] == filename:
logging.debug("A dataset with %s already exists, reusing", filename)
for k in this_ds.keys():
setattr(ds, k, this_ds[k])
return ds
except ValueError:
raise Exception('WTF?')
ds.ds_key = ds_key
if iter_count == max_iters - 1:
raise Exception("Unable to create Dataset ID")
max_hashes = rows * bands
data = {
'ds_key': "'%s'" % ds_key,
'source': "'%s'" % source,
'filename': "'%s'" % filename,
'random_seeds': str([(max_mask & random.getrandbits(max_bits)) for _ in xrange(max_hashes)]).replace('L',''),
'rows': rows,
'bands': bands,
'shingle_type': "'%s'" % shingle_type,
'minhash_modulo': minhash_modulo,
}
data_keys = data.keys()
data_vals = ', '.join([str(data[k]) for k in data_keys])
data_keys = ', '.join(data_keys)
qstring = 'INSERT INTO %s (%s) VALUES (%s)' % (cls.__name__, data_keys, data_vals)
query = SimpleStatement(qstring, consistency_level=ConsistencyLevel.QUORUM)
session.execute(query)
return cls.find(ds_key)
def get_else_create_doc(self, doc_id):
try:
docs = session.execute(self.doc_select, [self.ds_key, doc_id])
if len(docs) == 1:
return True, docs[0]
except:
pass
doc = Document(name = 'Document', attrs = Document.attrs)
doc.ds_key = self.ds_key
doc.doc_id = doc_id
return False, doc
def get_doc(self, doc_id):
try:
docs = session.execute(self.doc_select, [self.ds_key, doc_id])
if len(docs) == 1:
doc = Document(name = 'Document', attrs = Document.attrs)
doc.ds_key = self.ds_key
doc.doc_id = doc_id
ret_dict = docs[0]
for k in ret_dict.keys():
setattr(doc, k, ret_dict[k])
return doc
except:
pass
return None
def get_nns(self, doc_id):
doc = self.get_doc(doc_id)
if not doc:
return []
bkts = [CassandraInt.fm_db(bkt) for bkt in doc.buckets]
mhs = {}
for bkt in bkts:
bkt_docs = session.execute(self.nns_select, [self.ds_key, CassandraInt.to_db(bkt)])
for bkt_doc in bkt_docs:
mhs[bkt_doc['doc_id']] = bkt_doc['minhashes']
del mhs[doc_id]
jac = {}
for doc_id2 in mhs.keys():
jac_min = reduce(lambda x, y: x+y, map(lambda a,b: a == b, doc.minhashes,mhs[doc_id2])) / float(len(doc.minhashes))
jac[doc_id2] = 1.0 - jac_min
if 0 == int(1000*time.time()) % 100:
logging.info('Sampling (1%%) Jaccard distance %s | %s: %6.2f', doc_id, doc_id2, jac[doc_id2])
return jac
def sample_doc_ids(self, ratio):
doc_ids = session.execute(self.doc_ids_select, [self.ds_key])
doc_ids = random.sample(doc_ids, int(0.5+ratio*len(doc_ids)))
return [_['doc_id'] for _ in doc_ids]
def create_doc(self, _id, text, stats):
(found, doc) = self.get_else_create_doc(_id)
stats['found'] = found
if found:
# if 0 == int(1000*time.time()) % 20:
# # print 5% of the documents on average
# logging.info('%s %s',doc['ds_key'], doc['doc_id'])
return doc
### Parse
t0 = time.time()
soup = BeautifulSoup(text.replace('\\n',' '))
[s.extract() for s in soup(['script', 'style'])]
text = soup.get_text(separator=' ', strip=True)
text = symbols.sub(' ', text.lower())
text = ' '.join(text.split())
doc.text = text
tParse = time.time() - t0
stats['parse'] = tParse
doc.dataset = self
doc.rows = self.rows
doc.hashes = doc.rows * self.bands
doc.seeds = list(self.random_seeds)
doc.modulo = self.minhash_modulo
doc.sh_type = self.shingle_type
max_hashes = self.rows * self.bands
doc.minhashes = doc.calc_minhashes()
tMinhash = time.time() - t0 - tParse
stats['minhash'] = tMinhash
doc.buckets = doc.bucketize(doc.minhashes)
tBucketize = time.time() - t0 - tParse - tMinhash
stats['bucketize'] = tBucketize
# if 0 == int(1000*time.time()) % 20:
# # print 5% of the documents on average
# logging.info('%s %s %s', doc.ds_key, doc.doc_id, doc.buckets)
data = {
'ds_key': "'%s'" % doc.ds_key,
'doc_id': "'%s'" % doc.doc_id,
'minhashes': str(doc.minhashes).replace('L',''),
'buckets': str(doc.buckets).replace('L',''),
}
data_keys = data.keys()
data_vals = ', '.join([str(data[k]) for k in data_keys])
data_keys = ', '.join(data_keys)
qstring = 'INSERT INTO %s (%s) VALUES (%s)' % ('Document', data_keys, data_vals)
document = session.execute(qstring)
tCassWrite = time.time() - t0 - tParse - tMinhash - tBucketize
stats['cassandra'] = tCassWrite
doc_data = {
'ds_key': "'%s'" % doc.ds_key,
'doc_id': "'%s'" % doc.doc_id,
'buckets': doc.buckets,
'minhashes': doc.minhashes,
}
return doc_data
class Document(object):
__metaclass__ = CassandraTable
attrs = [
'ds_key text',
'doc_id text',
'buckets list<int>',
'minhashes list<int>',
'PRIMARY KEY (doc_id, ds_key)',
]
@classmethod
def create(cls):
# Make sure the underlying tables exist
doc = Document(name = cls.__name__, attrs = cls.attrs)
query = 'create index if not exists doc_buckets on %s.Document (buckets)' % keyspace
session.execute(query)
def calc_minhashes(self):
def minhashes_for_shingles(shingles):
def calc_onehash(shingle, seed):
def c4_hash(shingle):
h = struct.unpack('<i',shingle)[0]
hash_val = h & max_mask
return hash_val
# hash_val = shingle_cache.get(shingle)
# if hash_val:
# return hash_val
# h = struct.unpack('<i',shingle)[0]
# hash_val = h & max_mask
# shingle_cache.set(shingle, hash_val)
# return hash_val
if self.sh_type == 'c4':
return operator.xor(c4_hash(shingle), long(seed)) % self.modulo
else:
return operator.xor(compute_positive_hash(shingle), long(seed)) % self.modulo
minhashes = [max_mask for _ in xrange(self.hashes)]
for shingle in shingles:
for hno in xrange(self.hashes):
h_value = calc_onehash(shingle, self.seeds[hno])
minhashes[hno] = min(h_value, minhashes[hno])
return minhashes
##########################################
shingles = self.shingles()
minhashes = minhashes_for_shingles(shingles)
return minhashes
def shingles(self):
return self.text.split() if self.sh_type=='w' else set(_get_list_of_shingles(self.text))
def bucketize(self, minhashes):
buckets = []
band_bits = self.dataset.band_bits
band_mask = self.dataset.band_mask
hash_mask = self.dataset.hash_mask
for band in xrange(self.dataset.bands):
band_hash = (band_mask & band) * (hash_mask + 1)
minhashes_in_band = [minhashes[band*self.rows + row] for row in xrange(self.rows)]
minhashes_into_a_string = '-'.join([str(mh) for mh in minhashes_in_band])
bucket = band_hash | (hash_mask & int(hashlib.md5(minhashes_into_a_string).hexdigest(), 16))
buckets.append(CassandraInt.to_db(bucket))
return buckets
def main():
"""
Read input zip file, minhash the documents in it and put them in buckets
The zip file should have been created with data_prep/prepare_blobstore_zips
"""
try:
filename = os.path.abspath(sys.argv[1])
except IndexError:
print 'filename not provided'
exit(1)
try:
zip_reader = zipfile.ZipFile(filename)
except IOError:
print 'unable to read file {file}'.format(file = filename)
exit(1)
except zipfile.BadZipfile:
print 'file {file} is not a zip file'.format(file = filename)
exit(1)
infolist = zip_reader.infolist()
mtxname = filename.replace('.zip', '.matrix.csv')
dummydoc = Document.create() # force the creation of the table
dataset = DatasetPB.create('bash', filename) # force the creation of the table and filling it with a row
# logging.debug('%s %s', dataset.ds_key, dataset.filename)
dataset = DatasetPB.find(dataset.ds_key)
start = time.time()
all_stats = defaultdict(float)
new_docs_count = 0
docs_cache = Cache(max_size = 15)
buckets = set()
for info in infolist:
with zip_reader.open(info) as file_reader:
logging.debug('Reading file %s', info.filename)
stats = {}
for line in file_reader.readlines():
found_pattern = text_file_pattern.search(line)
doc_id = found_pattern.group(1)
html = found_pattern.group(2)
udata=html.decode("utf-8")
html=udata.encode("ascii","ignore")
html = html.replace('\\n',' ').replace('\\t',' ').replace("'", "''")
doc = dataset.create_doc(doc_id, html, stats)
buckets |= set(doc['buckets'])
docs_cache.set(doc_id, (html, doc['buckets'] if doc['buckets'] else [], doc['minhashes']))
if not stats['found']:
new_docs_count += 1
for stat in stats:
if stat != 'found':
all_stats[stat] += stats[stat]
stats = {}
end = time.time()
if new_docs_count:
logging.info('File %s %d seconds, stats: %s over %d docs', info.filename, int(0.5+end-start), all_stats, new_docs_count)
start = end
if new_docs_count:
for stat in all_stats:
if stat != 'found':
all_stats[stat] /= new_docs_count
logging.info('Average stats: %s over %d docs', all_stats, new_docs_count)
buckets = list(buckets)#[:200]
with open(mtxname, 'wb') as mtx_handler:
fileout = csv.writer(mtx_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fileout.writerow([' '] + sorted(list(buckets)))
row_count = 0
for info in infolist:
with zip_reader.open(info) as file_reader:
logging.debug('Reading file %s', info.filename)
stats = {}
for line in file_reader.readlines():
found_pattern = text_file_pattern.search(line)
doc_id = found_pattern.group(1)
doc = dataset.create_doc(doc_id, '', stats)
doc_buckets = doc['buckets']
x_marks = [('n' if _ in doc_buckets else '') for _ in buckets]
fileout.writerow(["'"+doc_id]+x_marks)
row_count += 1
if row_count >= 200:
break
if row_count >= 200:
break
outname = filename.replace('.zip', '.dists.csv')
doc_ids = docs_cache.keys()
with open(outname, 'wb') as out_handler:
fileout = csv.writer(out_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fileout.writerow(['doc_i', 'doc_j', 'com_bkts', 'jac_dist'])
for idx in xrange(len(doc_ids)):
(ihtml, ibkts, imhs) = docs_cache.get(doc_ids[idx])
for jdx in xrange(idx+1, len(doc_ids)):
(jhtml, jbkts, jmhs) = docs_cache.get(doc_ids[jdx])
com_bkts = len(set(ibkts) & set(jbkts))
jac_dist = 1.0 - reduce(lambda x, y: x+y, map(lambda a,b: a == b, imhs,jmhs)) / float(len(imhs))
# if jac_dist <= 0.1:
# lev_pick = 50
# else:
# lev_pick = 100
# if 0 == int(str(uuid.uuid4()).replace('-',''), 16) % lev_pick:
# lev_dist = '%8d' % levenshtein(ihtml, jhtml)
# else:
# lev_dist = '...xx...'
lev_dist = ''
logging.debug(' %s | %s, %3d %6.3f %s %s', doc_ids[idx], doc_ids[jdx],
com_bkts, jac_dist, lev_dist, sorted(list(set(ibkts) & set(jbkts))))
csv_line = [doc_ids[idx], doc_ids[jdx], com_bkts, jac_dist, lev_dist]
csv_line.extend(sorted(list(set(ibkts) & set(jbkts))))
fileout.writerow(csv_line)
cluster = Cluster()
keyspace = 'datathinks'
session = cluster.connect(keyspace)
session.row_factory = dict_factory
if __name__ == "__main__":
main()
| mit | 4,054,122,226,710,723,000 | 40.466667 | 139 | 0.524946 | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/api/queues_v1_api.py | 1 | 61918 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class QueuesV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_queue(self, owner, agent, body, **kwargs): # noqa: E501
"""Create queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_queue(owner, agent, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param V1Queue body: Queue body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Queue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_queue_with_http_info(owner, agent, body, **kwargs) # noqa: E501
def create_queue_with_http_info(self, owner, agent, body, **kwargs): # noqa: E501
"""Create queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_queue_with_http_info(owner, agent, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param V1Queue body: Queue body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Queue, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'agent',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_queue" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_queue`") # noqa: E501
# verify the required parameter 'agent' is set
if self.api_client.client_side_validation and ('agent' not in local_var_params or # noqa: E501
local_var_params['agent'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `agent` when calling `create_queue`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_queue`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'agent' in local_var_params:
path_params['agent'] = local_var_params['agent'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{agent}/queues', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Queue', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_queue(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_queue(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_queue_with_http_info(owner, entity, uuid, **kwargs) # noqa: E501
def delete_queue_with_http_info(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_queue_with_http_info(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_queue" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_queue`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `delete_queue`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `delete_queue`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{entity}/queues/{uuid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_queue(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Get queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_queue(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Queue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_queue_with_http_info(owner, entity, uuid, **kwargs) # noqa: E501
def get_queue_with_http_info(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Get queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_queue_with_http_info(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Queue, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_queue" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_queue`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `get_queue`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `get_queue`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{entity}/queues/{uuid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Queue', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_organization_queue_names(self, owner, **kwargs): # noqa: E501
"""List organization level queues names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_organization_queue_names(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListQueuesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_organization_queue_names_with_http_info(owner, **kwargs) # noqa: E501
def list_organization_queue_names_with_http_info(self, owner, **kwargs): # noqa: E501
"""List organization level queues names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_organization_queue_names_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListQueuesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_organization_queue_names" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_organization_queue_names`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/queues/names', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListQueuesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_organization_queues(self, owner, **kwargs): # noqa: E501
"""List organization level queues # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_organization_queues(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListQueuesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_organization_queues_with_http_info(owner, **kwargs) # noqa: E501
def list_organization_queues_with_http_info(self, owner, **kwargs): # noqa: E501
"""List organization level queues # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_organization_queues_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListQueuesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_organization_queues" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_organization_queues`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/queues', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListQueuesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_queue_names(self, owner, name, **kwargs): # noqa: E501
"""List queues names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queue_names(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListQueuesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_queue_names_with_http_info(owner, name, **kwargs) # noqa: E501
def list_queue_names_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""List queues names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queue_names_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListQueuesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'offset',
'limit',
'sort',
'query',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_queue_names" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_queue_names`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `list_queue_names`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{name}/queues/names', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListQueuesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_queues(self, owner, name, **kwargs): # noqa: E501
"""List queues # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queues(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListQueuesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_queues_with_http_info(owner, name, **kwargs) # noqa: E501
def list_queues_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""List queues # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_queues_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListQueuesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'offset',
'limit',
'sort',
'query',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_queues" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_queues`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `list_queues`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{name}/queues', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListQueuesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_queue(self, owner, agent, queue_uuid, body, **kwargs): # noqa: E501
"""Patch queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_queue(owner, agent, queue_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param str queue_uuid: UUID (required)
:param V1Queue body: Queue body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Queue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_queue_with_http_info(owner, agent, queue_uuid, body, **kwargs) # noqa: E501
def patch_queue_with_http_info(self, owner, agent, queue_uuid, body, **kwargs): # noqa: E501
"""Patch queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_queue_with_http_info(owner, agent, queue_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param str queue_uuid: UUID (required)
:param V1Queue body: Queue body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Queue, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'agent',
'queue_uuid',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_queue" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_queue`") # noqa: E501
# verify the required parameter 'agent' is set
if self.api_client.client_side_validation and ('agent' not in local_var_params or # noqa: E501
local_var_params['agent'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `agent` when calling `patch_queue`") # noqa: E501
# verify the required parameter 'queue_uuid' is set
if self.api_client.client_side_validation and ('queue_uuid' not in local_var_params or # noqa: E501
local_var_params['queue_uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `queue_uuid` when calling `patch_queue`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_queue`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'agent' in local_var_params:
path_params['agent'] = local_var_params['agent'] # noqa: E501
if 'queue_uuid' in local_var_params:
path_params['queue.uuid'] = local_var_params['queue_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{agent}/queues/{queue.uuid}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Queue', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_queue(self, owner, agent, queue_uuid, body, **kwargs): # noqa: E501
"""Update queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_queue(owner, agent, queue_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param str queue_uuid: UUID (required)
:param V1Queue body: Queue body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Queue
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_queue_with_http_info(owner, agent, queue_uuid, body, **kwargs) # noqa: E501
def update_queue_with_http_info(self, owner, agent, queue_uuid, body, **kwargs): # noqa: E501
"""Update queue # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_queue_with_http_info(owner, agent, queue_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str agent: Agent that consumes the queue (required)
:param str queue_uuid: UUID (required)
:param V1Queue body: Queue body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Queue, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'agent',
'queue_uuid',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_queue" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update_queue`") # noqa: E501
# verify the required parameter 'agent' is set
if self.api_client.client_side_validation and ('agent' not in local_var_params or # noqa: E501
local_var_params['agent'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `agent` when calling `update_queue`") # noqa: E501
# verify the required parameter 'queue_uuid' is set
if self.api_client.client_side_validation and ('queue_uuid' not in local_var_params or # noqa: E501
local_var_params['queue_uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `queue_uuid` when calling `update_queue`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_queue`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'agent' in local_var_params:
path_params['agent'] = local_var_params['agent'] # noqa: E501
if 'queue_uuid' in local_var_params:
path_params['queue.uuid'] = local_var_params['queue_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{agent}/queues/{queue.uuid}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Queue', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | -87,165,576,405,714,510 | 46.265649 | 132 | 0.564618 | false |
Subsets and Splits