id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
800 | filter_max_depth.py | gak_pycallgraph/docs/guide/filtering/filter_max_depth.py | #!/usr/bin/env python
from pycallgraph import PyCallGraph
from pycallgraph import Config
from pycallgraph.output import GraphvizOutput
from banana import Banana
config = Config(max_depth=1)
graphviz = GraphvizOutput(output_file='filter_max_depth.png')
with PyCallGraph(output=graphviz, config=config):
banana = Banana()
banana.eat()
| 346 | Python | .py | 10 | 32.3 | 61 | 0.812689 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
801 | banana.py | gak_pycallgraph/docs/guide/filtering/banana.py | import time
class Banana:
def __init__(self):
pass
def eat(self):
self.secret_function()
self.chew()
self.swallow()
def secret_function(self):
time.sleep(0.2)
def chew(self):
pass
def swallow(self):
pass
| 287 | Python | .py | 14 | 13.571429 | 30 | 0.556391 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
802 | filter_none.py | gak_pycallgraph/docs/guide/filtering/filter_none.py | #!/usr/bin/env python
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
from banana import Banana
graphviz = GraphvizOutput(output_file='filter_none.png')
with PyCallGraph(output=graphviz):
banana = Banana()
banana.eat()
| 266 | Python | .py | 8 | 30.625 | 56 | 0.810277 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
803 | filter_exclude.py | gak_pycallgraph/docs/guide/filtering/filter_exclude.py | #!/usr/bin/env python
from pycallgraph import PyCallGraph
from pycallgraph import Config
from pycallgraph import GlobbingFilter
from pycallgraph.output import GraphvizOutput
from banana import Banana
config = Config()
config.trace_filter = GlobbingFilter(exclude=[
'pycallgraph.*',
'*.secret_function',
])
graphviz = GraphvizOutput(output_file='filter_exclude.png')
with PyCallGraph(output=graphviz, config=config):
banana = Banana()
banana.eat()
| 469 | Python | .py | 15 | 28.8 | 59 | 0.796875 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
804 | util.py | gak_pycallgraph/pycallgraph/util.py | class Util(object):
@staticmethod
def human_readable_bibyte(num):
num = float(num)
for x in ['B', 'KiB', 'MiB', 'GiB']:
if num < 1024 and num > -1024:
return '{:3.1f}{}'.format(num, x)
num /= 1024
return '{:3.1f}{}'.format(num, 'TiB')
| 308 | Python | .py | 9 | 25.111111 | 49 | 0.486577 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
805 | config.py | gak_pycallgraph/pycallgraph/config.py | import argparse
import sys
from .output import outputters
from .globbing_filter import GlobbingFilter
from .grouper import Grouper
class Config(object):
'''Handles configuration settings for pycallgraph, tracer, and each output
module. It also handles command line arguments.
'''
def __init__(self, **kwargs):
'''
You can set defaults in the constructor, e.g. Config(verbose=True)
'''
self.output = None
self.verbose = False
self.debug = False
self.groups = True
self.threaded = False
self.memory = False
# Filtering
self.include_stdlib = False
self.include_pycallgraph = False
self.max_depth = 99999
self.trace_filter = GlobbingFilter(
exclude=['pycallgraph.*'],
include=['*'],
)
# Grouping
self.trace_grouper = Grouper()
self.did_init = True
# Update the defaults with anything from kwargs
[setattr(self, k, v) for k, v in kwargs.iteritems()]
self.create_parser()
def log_verbose(self, text):
if self.verbose:
print(text)
def log_debug(self, text):
if self.debug:
print(text)
def add_module_arguments(self, usage):
subparsers = self.parser.add_subparsers(
help='OUTPUT_TYPE', dest='output')
parent_parser = self.create_parent_parser()
for name, cls in outputters.items():
cls.add_arguments(subparsers, parent_parser, usage)
def get_output(self):
if not self.output:
return
output = outputters[self.output]()
output.set_config(self)
return output
def parse_args(self, args=None):
self.parser.parse_args(args, namespace=self)
self.convert_filter_args()
def strip_argv(self):
sys.argv = [self.command] + self.command_args
def convert_filter_args(self):
if not self.include:
self.include = ['*']
if not self.include_pycallgraph:
self.exclude.append('pycallgraph.*')
self.trace_filter = GlobbingFilter(
include=self.include,
exclude=self.exclude,
)
def create_parser(self):
'''Used by the pycallgraph command line interface to parse
arguments.
'''
usage = 'pycallgraph [options] OUTPUT_TYPE [output_options] -- ' \
'SCRIPT.py [ARG ...]'
self.parser = argparse.ArgumentParser(
description='Python Call Graph profiles a Python script and '
'generates a call graph visualization.', usage=usage,
)
self.add_ungrouped_arguments()
self.add_filter_arguments()
self.add_module_arguments(usage)
def create_parent_parser(self):
'''Mixing subparsers with positional arguments can be done with a
parents option. Found via: http://stackoverflow.com/a/11109863/11125
'''
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
'command', metavar='SCRIPT',
help='The Python script file to profile',
)
parent_parser.add_argument(
'command_args', metavar='ARG', nargs='*',
help='Python script arguments.'
)
return parent_parser
def add_ungrouped_arguments(self):
self.parser.add_argument(
'-v', '--verbose', action='store_true', default=self.verbose,
help='Display informative messages while running')
self.parser.add_argument(
'-d', '--debug', action='store_true', default=self.debug,
help='Display debugging messages while running')
self.parser.add_argument(
'-t', '--threaded', action='store_true', default=self.threaded,
help='Process traces asyncronously (Experimental)')
self.parser.add_argument(
'-ng', '--no-groups', dest='groups', action='store_false',
default=self.groups, help='Do not group functions by module')
self.parser.add_argument(
'-s', '--stdlib', dest='include_stdlib', action='store_true',
default=self.include_stdlib,
help='Include standard library functions in the trace')
self.parser.add_argument(
'-m', '--memory', action='store_true', default=self.memory,
help='(Experimental) Track memory usage')
def add_filter_arguments(self):
group = self.parser.add_argument_group('filtering')
group.add_argument(
'-i', '--include', default=[], action='append',
help='Wildcard pattern of modules to include in the output. '
'You can have multiple include arguments.'
)
group.add_argument(
'-e', '--exclude', default=[], action='append',
help='Wildcard pattern of modules to exclude in the output. '
'You can have multiple exclude arguments.'
)
group.add_argument(
'--include-pycallgraph', default=self.include_pycallgraph,
action='store_true',
help='Do not automatically filter out pycallgraph',
)
group.add_argument(
'--max-depth', default=self.max_depth, type=int,
help='Maximum stack depth to trace',
)
| 5,388 | Python | .py | 133 | 30.804511 | 78 | 0.607163 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
806 | memory_profiler.py | gak_pycallgraph/pycallgraph/memory_profiler.py | """Profile the memory usage of a Python program"""
__version__ = '0.25'
_CMD_USAGE = "python -m memory_profiler script_file.py"
import time, sys, os, pdb
import warnings
import linecache
import inspect
import subprocess
# TODO: provide alternative when multprocessing is not available
try:
from multiprocessing import Process, Pipe
except ImportError:
from multiprocessing.dummy import Process, Pipe
try:
import psutil
def _get_memory(pid):
process = psutil.Process(pid)
try:
mem = float(process.get_memory_info()[0]) / (1024 ** 2)
except psutil.AccessDenied:
mem = -1
return mem
except ImportError:
warnings.warn("psutil module not found. memory_profiler will be slow")
if os.name == 'posix':
def _get_memory(pid):
# ..
# .. memory usage in MB ..
# .. this should work on both Mac and Linux ..
# .. subprocess.check_output appeared in 2.7, using Popen ..
# .. for backwards compatibility ..
out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
stdout=subprocess.PIPE).communicate()[0].split(b'\n')
try:
vsz_index = out[0].split().index(b'RSS')
return float(out[1].split()[vsz_index]) / 1024
except:
return -1
else:
raise NotImplementedError('The psutil module is required for non-unix '
'platforms')
class Timer(Process):
"""
Fetch memory consumption from over a time interval
"""
def __init__(self, monitor_pid, interval, pipe, *args, **kw):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.cont = True
super(Timer, self).__init__(*args, **kw)
def run(self):
m = _get_memory(self.monitor_pid)
timings = [m]
self.pipe.send(0) # we're ready
while not self.pipe.poll(self.interval):
m = _get_memory(self.monitor_pid)
timings.append(m)
self.pipe.send(timings)
def memory_usage(proc=-1, interval=0.0, timeout=None):
"""
Return the memory usage of a process or piece of code
Parameters
----------
proc : {int, string, tuple, subprocess.Popen}, optional
The process to monitor. Can be given by an integer/string
representing a PID, by a Popen object or by a tuple
representing a Python function. The tuple contains three
values (f, args, kw) and specifies to run the function
f(*args, **kw).
Set to -1 (default) for current process.
interval : float, optional
Interval at which measurements are collected.
timeout : float, optional
Maximum amount of time (in seconds) to wait before returning.
Returns
-------
mem_usage : list of floating-poing values
memory usage, in MB. It's length is always < timeout / interval
"""
ret = []
if timeout is not None:
max_iter = int(timeout / interval)
elif isinstance(proc, int):
# external process and no timeout
max_iter = 1
else:
# for a Python function wait until it finishes
max_iter = float('inf')
if hasattr(proc, '__call__'):
proc = (proc, (), {})
if isinstance(proc, (list, tuple)):
if len(proc) == 1:
f, args, kw = (proc[0], (), {})
elif len(proc) == 2:
f, args, kw = (proc[0], proc[1], {})
elif len(proc) == 3:
f, args, kw = (proc[0], proc[1], proc[2])
else:
raise ValueError
aspec = inspect.getargspec(f)
n_args = len(aspec.args)
if aspec.defaults is not None:
n_args -= len(aspec.defaults)
if n_args != len(args):
raise ValueError(
'Function expects %s value(s) but %s where given'
% (n_args, len(args)))
child_conn, parent_conn = Pipe() # this will store Timer's results
p = Timer(os.getpid(), interval, child_conn)
p.start()
parent_conn.recv() # wait until we start getting memory
f(*args, **kw)
parent_conn.send(0) # finish timing
ret = parent_conn.recv()
p.join(5 * interval)
elif isinstance(proc, subprocess.Popen):
# external process, launched from Python
while True:
ret.append(_get_memory(proc.pid))
time.sleep(interval)
if timeout is not None:
max_iter -= 1
if max_iter == 0:
break
if proc.poll() is not None:
break
else:
# external process
if proc == -1:
proc = os.getpid()
if max_iter == -1:
max_iter = 1
counter = 0
while counter < max_iter:
counter += 1
ret.append(_get_memory(proc))
time.sleep(interval)
return ret
# ..
# .. utility functions for line-by-line ..
def _find_script(script_name):
""" Find the script.
If the input is not a file, then $PATH will be searched.
"""
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for folder in path:
if folder == '':
continue
fn = os.path.join(folder, script_name)
if os.path.isfile(fn):
return fn
sys.stderr.write('Could not find script {0}\n'.format(script_name))
raise SystemExit(1)
class LineProfiler:
""" A profiler that records the amount of memory for each line """
def __init__(self, **kw):
self.functions = list()
self.code_map = {}
self.enable_count = 0
self.max_mem = kw.get('max_mem', None)
def __call__(self, func):
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# func_code does not exist in Python3
code = func.__code__
except AttributeError:
import warnings
warnings.warn("Could not extract a code object for the object %r"
% (func,))
return
if code not in self.code_map:
self.code_map[code] = {}
self.functions.append(func)
def wrap_function(self, func):
""" Wrap a function to profile it.
"""
def f(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
return result
return f
def run(self, cmd):
""" Profile a single executable statment in the main namespace.
"""
import __main__
main_dict = __main__.__dict__
return self.runctx(cmd, main_dict, main_dict)
def runctx(self, cmd, globals, locals):
""" Profile a single executable statement in the given namespaces.
"""
self.enable_by_count()
try:
exec(cmd, globals, locals)
finally:
self.disable_by_count()
return self
def runcall(self, func, *args, **kw):
""" Profile a single function call.
"""
# XXX where is this used ? can be removed ?
self.enable_by_count()
try:
return func(*args, **kw)
finally:
self.disable_by_count()
def enable_by_count(self):
""" Enable the profiler if it hasn't been enabled before.
"""
if self.enable_count == 0:
self.enable()
self.enable_count += 1
def disable_by_count(self):
""" Disable the profiler if the number of disable requests matches the
number of enable requests.
"""
if self.enable_count > 0:
self.enable_count -= 1
if self.enable_count == 0:
self.disable()
def trace_memory_usage(self, frame, event, arg):
"""Callback for sys.settrace"""
if event in ('line', 'return') and frame.f_code in self.code_map:
lineno = frame.f_lineno
if event == 'return':
lineno += 1
entry = self.code_map[frame.f_code].setdefault(lineno, [])
entry.append(_get_memory(os.getpid()))
return self.trace_memory_usage
def trace_max_mem(self, frame, event, arg):
# run into PDB as soon as memory is higher than MAX_MEM
if event in ('line', 'return') and frame.f_code in self.code_map:
c = _get_memory(os.getpid())
if c >= self.max_mem:
t = 'Current memory {0:.2f} MB exceeded the maximum '.format(c) + \
'of {0:.2f} MB\n'.format(self.max_mem)
sys.stdout.write(t)
sys.stdout.write('Stepping into the debugger \n')
frame.f_lineno -= 2
p = pdb.Pdb()
p.quitting = False
p.stopframe = frame
p.returnframe = None
p.stoplineno = frame.f_lineno - 3
p.botframe = None
return p.trace_dispatch
return self.trace_max_mem
def __enter__(self):
self.enable_by_count()
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable_by_count()
def enable(self):
if self.max_mem is not None:
sys.settrace(self.trace_max_mem)
else:
sys.settrace(self.trace_memory_usage)
def disable(self):
self.last_time = {}
sys.settrace(None)
def show_results(prof, stream=None, precision=3):
if stream is None:
stream = sys.stdout
template = '{0:>6} {1:>12} {2:>12} {3:<}'
for code in prof.code_map:
lines = prof.code_map[code]
if not lines:
# .. measurements are empty ..
continue
filename = code.co_filename
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
stream.write('Filename: ' + filename + '\n\n')
if not os.path.exists(filename):
stream.write('ERROR: Could not find file ' + filename + '\n')
if filename.startswith("ipython-input") or filename.startswith("<ipython-input"):
print("NOTE: %mprun can only be used on functions defined in "
"physical files, and not in the IPython environment.")
continue
all_lines = linecache.getlines(filename)
sub_lines = inspect.getblock(all_lines[code.co_firstlineno - 1:])
linenos = range(code.co_firstlineno, code.co_firstlineno +
len(sub_lines))
lines_normalized = {}
header = template.format('Line #', 'Mem usage', 'Increment',
'Line Contents')
stream.write(header + '\n')
stream.write('=' * len(header) + '\n')
# move everything one frame up
keys = sorted(lines.keys())
k_old = keys[0] - 1
lines_normalized[keys[0] - 1] = lines[keys[0]]
for i in range(1, len(lines_normalized[keys[0] - 1])):
lines_normalized[keys[0] - 1][i] = -1.
k = keys.pop(0)
while keys:
lines_normalized[k] = lines[keys[0]]
for i in range(len(lines_normalized[k_old]),
len(lines_normalized[k])):
lines_normalized[k][i] = -1.
k_old = k
k = keys.pop(0)
first_line = sorted(lines_normalized.keys())[0]
mem_old = max(lines_normalized[first_line])
precision = int(precision)
template_mem = '{{0:{0}.{1}'.format(precision + 6, precision) + 'f} MB'
for i, l in enumerate(linenos):
mem = ''
inc = ''
if l in lines_normalized:
mem = max(lines_normalized[l])
inc = mem - mem_old
mem_old = mem
mem = template_mem.format(mem)
inc = template_mem.format(inc)
stream.write(template.format(l, mem, inc, sub_lines[i]))
stream.write('\n\n')
# A lprun-style %mprun magic for IPython.
def magic_mprun(self, parameter_s=''):
""" Execute a statement under the line-by-line memory profiler from the
memory_profilser module.
Usage:
%mprun -f func1 -f func2 <statement>
The given statement (which doesn't require quote marks) is run via the
LineProfiler. Profiling is enabled for the functions specified by the -f
options. The statistics will be shown side-by-side with the code through
the pager once the statement has completed.
Options:
-f <function>: LineProfiler only profiles functions and methods it is told
to profile. This option tells the profiler about these functions. Multiple
-f options may be used. The argument may be any expression that gives
a Python function or method object. However, one must be careful to avoid
spaces that may confuse the option parser. Additionally, functions defined
in the interpreter at the In[] prompt or via %run currently cannot be
displayed. Write these functions out to a separate file and import them.
One or more -f options are required to get any useful results.
-T <filename>: dump the text-formatted statistics with the code
side-by-side out to a text file.
-r: return the LineProfiler object after it has completed profiling.
"""
try:
from StringIO import StringIO
except ImportError: # Python 3.x
from io import StringIO
# Local imports to avoid hard dependency.
from distutils.version import LooseVersion
import IPython
ipython_version = LooseVersion(IPython.__version__)
if ipython_version < '0.11':
from IPython.genutils import page
from IPython.ipstruct import Struct
from IPython.ipapi import UsageError
else:
from IPython.core.page import page
from IPython.utils.ipstruct import Struct
from IPython.core.error import UsageError
# Escape quote markers.
opts_def = Struct(T=[''], f=[])
parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
opts, arg_str = self.parse_options(parameter_s, 'rf:T:', list_all=True)
opts.merge(opts_def)
global_ns = self.shell.user_global_ns
local_ns = self.shell.user_ns
# Get the requested functions.
funcs = []
for name in opts.f:
try:
funcs.append(eval(name, global_ns, local_ns))
except Exception as e:
raise UsageError('Could not find function %r.\n%s: %s' % (name,
e.__class__.__name__, e))
profile = LineProfiler()
for func in funcs:
profile(func)
# Add the profiler to the builtins for @profile.
try:
import builtins
except ImportError: # Python 3x
import __builtin__ as builtins
if 'profile' in builtins.__dict__:
had_profile = True
old_profile = builtins.__dict__['profile']
else:
had_profile = False
old_profile = None
builtins.__dict__['profile'] = profile
try:
try:
profile.runctx(arg_str, global_ns, local_ns)
message = ''
except SystemExit:
message = "*** SystemExit exception caught in code being profiled."
except KeyboardInterrupt:
message = ("*** KeyboardInterrupt exception caught in code being "
"profiled.")
finally:
if had_profile:
builtins.__dict__['profile'] = old_profile
# Trap text output.
stdout_trap = StringIO()
show_results(profile, stdout_trap)
output = stdout_trap.getvalue()
output = output.rstrip()
if ipython_version < '0.11':
page(output, screen_lines=self.shell.rc.screen_length)
else:
page(output)
print(message,)
text_file = opts.T[0]
if text_file:
with open(text_file, 'w') as pfile:
pfile.write(output)
print('\n*** Profile printout saved to text file %s. %s' % (text_file,
message))
return_value = None
if 'r' in opts:
return_value = profile
return return_value
def _func_exec(stmt, ns):
# helper for magic_memit, just a function proxy for the exec
# statement
exec(stmt, ns)
# a timeit-style %memit magic for IPython
def magic_memit(self, line=''):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-r<R>t<T>] statement
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 1: 76.402344 MB per loop
In [3]: %memit np.ones(1e6)
maximum of 1: 7.820312 MB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MB per loop
In [5]: memit -t 3 while True: pass;
Subprocess timed out.
Subprocess timed out.
Subprocess timed out.
ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
option.
maximum of 1: -inf MB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False)
repeat = int(getattr(opts, 'r', 1))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
mem_usage = []
for _ in range(repeat):
tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout)
mem_usage.extend(tmp)
if mem_usage:
print('maximum of %d: %f MB per loop' % (repeat, max(mem_usage)))
else:
print('ERROR: could not read memory usage, try with a lower interval or more iterations')
def load_ipython_extension(ip):
"""This is called to load the module as an IPython extension."""
ip.define_magic('mprun', magic_mprun)
ip.define_magic('memit', magic_memit)
def profile(func, stream=None):
"""
Decorator that will run the function and print a line-by-line profile
"""
def wrapper(*args, **kwargs):
prof = LineProfiler()
val = prof(func)(*args, **kwargs)
show_results(prof, stream=stream)
return val
return wrapper
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage=_CMD_USAGE, version=__version__)
parser.disable_interspersed_args()
parser.add_option("--pdb-mmem", dest="max_mem", metavar="MAXMEM",
type="float", action="store",
help="step into the debugger when memory exceeds MAXMEM")
parser.add_option('--precision', dest="precision", type="int",
action="store", default=3,
help="precision of memory output in number of significant digits")
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
(options, args) = parser.parse_args()
prof = LineProfiler(max_mem=options.max_mem)
__file__ = _find_script(args[0])
try:
if sys.version_info[0] < 3:
import __builtin__
__builtin__.__dict__['profile'] = prof
ns = locals()
ns['profile'] = prof # shadow the profile decorator defined above
execfile(__file__, ns, ns)
else:
import builtins
builtins.__dict__['profile'] = prof
ns = locals()
ns['profile'] = prof # shadow the profile decorator defined above
exec(compile(open(__file__).read(), __file__, 'exec'), ns,
globals())
finally:
show_results(prof, precision=options.precision)
| 20,186 | Python | .py | 523 | 29.458891 | 97 | 0.580871 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
807 | metadata.py | gak_pycallgraph/pycallgraph/metadata.py | # A different file to pycallgraph.py because of circular import problem
__version__ = '1.0.1'
__copyright__ = 'Copyright Gerald Kaszuba 2007-2013'
__license__ = 'GPLv2'
__author__ = 'Gerald Kaszuba'
__email__ = '[email protected]'
__url__ = 'http://pycallgraph.slowchop.com/'
__credits__ = [
'Gerald Kaszuba',
]
| 322 | Python | .py | 10 | 30.7 | 71 | 0.684887 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
808 | __init__.py | gak_pycallgraph/pycallgraph/__init__.py | '''
Python Call Graph is a library and command line tool that visualises the flow
of your Python application.
See http://pycallgraph.slowchop.com/ for more information.
'''
from .metadata import __version__
from .metadata import __copyright__
from .metadata import __license__
from .metadata import __author__
from .metadata import __email__
from .metadata import __url__
from .metadata import __credits__
from .pycallgraph import PyCallGraph
from .exceptions import PyCallGraphException
from . import decorators
from .config import Config
from .globbing_filter import GlobbingFilter
from .grouper import Grouper
from .util import Util
from .color import Color
from .color import ColorException
| 697 | Python | .py | 21 | 32.095238 | 77 | 0.811573 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
809 | pycallgraph.py | gak_pycallgraph/pycallgraph/pycallgraph.py | import locale
from .output import Output
from .config import Config
from .tracer import AsyncronousTracer, SyncronousTracer
from .exceptions import PyCallGraphException
class PyCallGraph(object):
def __init__(self, output=None, config=None):
'''output can be a single Output instance or an iterable with many
of them. Example usage:
PyCallGraph(output=GraphvizOutput(), config=Config())
'''
locale.setlocale(locale.LC_ALL, '')
if output is None:
self.output = []
elif isinstance(output, Output):
self.output = [output]
else:
self.output = output
self.config = config or Config()
configured_ouput = self.config.get_output()
if configured_ouput:
self.output.append(configured_ouput)
self.reset()
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.done()
def get_tracer_class(self):
if self.config.threaded:
return AsyncronousTracer
else:
return SyncronousTracer
def reset(self):
'''Resets all collected statistics. This is run automatically by
start(reset=True) and when the class is initialized.
'''
self.tracer = self.get_tracer_class()(self.output, config=self.config)
for output in self.output:
self.prepare_output(output)
def start(self, reset=True):
'''Begins a trace. Setting reset to True will reset all previously
recorded trace data.
'''
if not self.output:
raise PyCallGraphException(
'No outputs declared. Please see the '
'examples in the online documentation.'
)
if reset:
self.reset()
for output in self.output:
output.start()
self.tracer.start()
def stop(self):
'''Stops the currently running trace, if any.'''
self.tracer.stop()
def done(self):
'''Stops the trace and tells the outputters to generate their
output.
'''
self.stop()
self.generate()
def generate(self):
# If in threaded mode, wait for the processor thread to complete
self.tracer.done()
for output in self.output:
output.done()
def add_output(self, output):
self.output.append(output)
self.prepare_output(output)
def prepare_output(self, output):
output.sanity_check()
output.set_processor(self.tracer.processor)
output.reset()
| 2,634 | Python | .py | 74 | 26.594595 | 78 | 0.614353 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
810 | color.py | gak_pycallgraph/pycallgraph/color.py | import colorsys
class ColorException(Exception):
pass
class Color(object):
def __init__(self, r, g, b, a=1):
self.r = r
self.g = g
self.b = b
self.a = a
self.validate_all()
@classmethod
def hsv(cls, h, s, v, a=1):
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return cls(r, g, b, a)
def __str__(self):
return '<Color {}>'.format(self.rgba_web())
def validate_all(self):
self.validate('r')
self.validate('g')
self.validate('b')
self.validate('a')
def validate(self, attr):
v = getattr(self, attr)
if not 0 <= v <= 1:
raise ColorException('{} out of range 0 to 1: {}'.format(attr, v))
@property
def r255(self):
return int(self.r * 255)
@property
def g255(self):
return int(self.g * 255)
@property
def b255(self):
return int(self.b * 255)
@property
def a255(self):
return int(self.a * 255)
def rgb_web(self):
'''Returns a string with the RGB components as a HTML hex string.'''
return '#{0.r255:02x}{0.g255:02x}{0.b255:02x}'.format(self)
def rgba_web(self):
'''Returns a string with the RGBA components as a HTML hex string.'''
return '{0}{1.a255:02x}'.format(self.rgb_web(), self)
def rgb_csv(self):
'''Returns a string with the RGB components as CSV.'''
return '{0.r255},{0.g255},{0.b255}'.format(self)
| 1,487 | Python | .py | 46 | 24.978261 | 78 | 0.562807 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
811 | globbing_filter.py | gak_pycallgraph/pycallgraph/globbing_filter.py | from fnmatch import fnmatch
class GlobbingFilter(object):
'''Filter module names using a set of globs.
Objects are matched against the exclude list first, then the include list.
Anything that passes through without matching either, is excluded.
'''
def __init__(self, include=None, exclude=None):
if include is None and exclude is None:
include = ['*']
exclude = []
elif include is None:
include = ['*']
elif exclude is None:
exclude = []
self.include = include
self.exclude = exclude
def __call__(self, full_name=None):
for pattern in self.exclude:
if fnmatch(full_name, pattern):
return False
for pattern in self.include:
if fnmatch(full_name, pattern):
return True
return False
| 881 | Python | .py | 24 | 27.375 | 78 | 0.600707 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
812 | grouper.py | gak_pycallgraph/pycallgraph/grouper.py | from fnmatch import fnmatch
class Grouper(object):
'''Group module names.
By default, objects are grouped by their top-level module name. Additional
groups can be specified with the groups list and all objects will be
matched against it.
'''
def __init__(self, groups=None):
if groups is None:
groups = []
self.groups = groups
def __call__(self, full_name=None):
for pattern in self.groups:
if fnmatch(full_name, pattern):
if pattern[-2:] == ".*":
# a wilcard in the middle is probably meaningful, while at
# the end, it's only noise and can be removed
return pattern[:-2]
return pattern
return full_name.split('.', 1)[0]
| 804 | Python | .py | 20 | 30.1 | 78 | 0.583548 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
813 | decorators.py | gak_pycallgraph/pycallgraph/decorators.py | import functools
from .pycallgraph import PyCallGraph
def trace(output=None, config=None):
def inner(func):
@functools.wraps(func)
def exec_func(*args, **kw_args):
with(PyCallGraph(output, config)):
return func(*args, **kw_args)
return exec_func
return inner
| 324 | Python | .py | 10 | 24.9 | 46 | 0.640777 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
814 | tracer.py | gak_pycallgraph/pycallgraph/tracer.py | from __future__ import division
import inspect
import sys
import os
import time
from distutils import sysconfig
from collections import defaultdict
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from .util import Util
class SyncronousTracer(object):
def __init__(self, outputs, config):
self.processor = TraceProcessor(outputs, config)
self.config = config
def tracer(self, frame, event, arg):
self.processor.process(frame, event, arg, self.memory())
return self.tracer
def memory(self):
if self.config.memory:
from .memory_profiler import memory_usage
return int(memory_usage(-1, 0)[0] * 1000000)
def start(self):
sys.settrace(self.tracer)
def stop(self):
sys.settrace(None)
def done(self):
pass
class AsyncronousTracer(SyncronousTracer):
def start(self):
self.processor.start()
SyncronousTracer.start(self)
def tracer(self, frame, event, arg):
self.processor.queue(frame, event, arg, self.memory())
return self.tracer
def done(self):
self.processor.done()
self.processor.join()
class TraceProcessor(Thread):
'''
Contains a callback used by sys.settrace, which collects information about
function call count, time taken, etc.
'''
def __init__(self, outputs, config):
Thread.__init__(self)
self.trace_queue = Queue()
self.keep_going = True
self.outputs = outputs
self.config = config
self.updatables = [a for a in self.outputs if a.should_update()]
self.init_trace_data()
self.init_libpath()
def init_trace_data(self):
self.previous_event_return = False
# A mapping of which function called which other function
self.call_dict = defaultdict(lambda: defaultdict(int))
# Current call stack
self.call_stack = ['__main__']
# Counters for each function
self.func_count = defaultdict(int)
self.func_count_max = 0
self.func_count['__main__'] = 1
# Accumulative time per function
self.func_time = defaultdict(float)
self.func_time_max = 0
# Accumulative memory addition per function
self.func_memory_in = defaultdict(int)
self.func_memory_in_max = 0
# Accumulative memory addition per function once exited
self.func_memory_out = defaultdict(int)
self.func_memory_out_max = 0
# Keeps track of the start time of each call on the stack
self.call_stack_timer = []
self.call_stack_memory_in = []
self.call_stack_memory_out = []
def init_libpath(self):
self.lib_path = sysconfig.get_python_lib()
path = os.path.split(self.lib_path)
if path[1] == 'site-packages':
self.lib_path = path[0]
self.lib_path = self.lib_path.lower()
def queue(self, frame, event, arg, memory):
data = {
'frame': frame,
'event': event,
'arg': arg,
'memory': memory,
}
self.trace_queue.put(data)
def run(self):
while self.keep_going:
try:
data = self.trace_queue.get(timeout=0.1)
except Empty:
pass
self.process(**data)
def done(self):
while not self.trace_queue.empty():
time.sleep(0.1)
self.keep_going = False
def process(self, frame, event, arg, memory=None):
'''This function processes a trace result. Keeps track of
relationships between calls.
'''
if memory is not None and self.previous_event_return:
# Deal with memory when function has finished so local variables
# can be cleaned up
self.previous_event_return = False
if self.call_stack_memory_out:
full_name, m = self.call_stack_memory_out.pop(-1)
else:
full_name, m = (None, None)
# NOTE: Call stack is no longer the call stack that may be
# expected. Potentially need to store a copy of it.
if full_name and m:
call_memory = memory - m
self.func_memory_out[full_name] += call_memory
self.func_memory_out_max = max(
self.func_memory_out_max, self.func_memory_out[full_name]
)
if event == 'call':
keep = True
code = frame.f_code
# Stores all the parts of a human readable name of the current call
full_name_list = []
# Work out the module name
module = inspect.getmodule(code)
if module:
module_name = module.__name__
module_path = module.__file__
if not self.config.include_stdlib \
and self.is_module_stdlib(module_path):
keep = False
if module_name == '__main__':
module_name = ''
else:
module_name = ''
if module_name:
full_name_list.append(module_name)
# Work out the class name
try:
class_name = frame.f_locals['self'].__class__.__name__
full_name_list.append(class_name)
except (KeyError, AttributeError):
class_name = ''
# Work out the current function or method
func_name = code.co_name
if func_name == '?':
func_name = '__main__'
full_name_list.append(func_name)
# Create a readable representation of the current call
full_name = '.'.join(full_name_list)
if len(self.call_stack) > self.config.max_depth:
keep = False
# Load the trace filter, if any. 'keep' determines if we should
# ignore this call
if keep and self.config.trace_filter:
keep = self.config.trace_filter(full_name)
# Store the call information
if keep:
if self.call_stack:
src_func = self.call_stack[-1]
else:
src_func = None
self.call_dict[src_func][full_name] += 1
self.func_count[full_name] += 1
self.func_count_max = max(
self.func_count_max, self.func_count[full_name]
)
self.call_stack.append(full_name)
self.call_stack_timer.append(time.time())
if memory is not None:
self.call_stack_memory_in.append(memory)
self.call_stack_memory_out.append([full_name, memory])
else:
self.call_stack.append('')
self.call_stack_timer.append(None)
if event == 'return':
self.previous_event_return = True
if self.call_stack:
full_name = self.call_stack.pop(-1)
if self.call_stack_timer:
start_time = self.call_stack_timer.pop(-1)
else:
start_time = None
if start_time:
call_time = time.time() - start_time
self.func_time[full_name] += call_time
self.func_time_max = max(
self.func_time_max, self.func_time[full_name]
)
if memory is not None:
if self.call_stack_memory_in:
start_mem = self.call_stack_memory_in.pop(-1)
else:
start_mem = None
if start_mem:
call_memory = memory - start_mem
self.func_memory_in[full_name] += call_memory
self.func_memory_in_max = max(
self.func_memory_in_max,
self.func_memory_in[full_name],
)
def is_module_stdlib(self, file_name):
'''
Returns True if the file_name is in the lib directory. Used to check
if a function is in the standard library or not.
'''
return file_name.lower().startswith(self.lib_path)
def __getstate__(self):
'''Used for when creating a pickle. Certain instance variables can't
pickled and aren't used anyway.
'''
odict = self.__dict__.copy()
dont_keep = [
'outputs',
'config',
'updatables',
'lib_path',
]
for key in dont_keep:
del odict[key]
return odict
def groups(self):
grp = defaultdict(list)
for node in self.nodes():
grp[node.group].append(node)
for g in grp.iteritems():
yield g
def stat_group_from_func(self, func, calls):
stat_group = StatGroup()
stat_group.name = func
stat_group.group = self.config.trace_grouper(func)
stat_group.calls = Stat(calls, self.func_count_max)
stat_group.time = Stat(self.func_time.get(func, 0), self.func_time_max)
stat_group.memory_in = Stat(
self.func_memory_in.get(func, 0), self.func_memory_in_max
)
stat_group.memory_out = Stat(
self.func_memory_in.get(func, 0), self.func_memory_in_max
)
return stat_group
def nodes(self):
for func, calls in self.func_count.iteritems():
yield self.stat_group_from_func(func, calls)
def edges(self):
for src_func, dests in self.call_dict.iteritems():
if not src_func:
continue
for dst_func, calls in dests.iteritems():
edge = self.stat_group_from_func(dst_func, calls)
edge.src_func = src_func
edge.dst_func = dst_func
yield edge
class Stat(object):
'''Stores a "statistic" value, e.g. "time taken" along with the maximum
possible value of the value, which is used to calculate the fraction of 1.
The fraction is used for choosing colors.
'''
def __init__(self, value, total):
self.value = value
self.total = total
try:
self.fraction = value / total
except ZeroDivisionError:
self.fraction = 0
@property
def value_human_bibyte(self):
'''Mebibyte of the value in human readable a form.'''
return Util.human_readable_bibyte(self.value)
class StatGroup(object):
pass
def simple_memoize(callable_object):
'''Simple memoization for functions without keyword arguments.
This is useful for mapping code objects to module in this context.
inspect.getmodule() requires a number of system calls, which may slow down
the tracing considerably. Caching the mapping from code objects (there is
*one* code object for each function, regardless of how many simultaneous
activations records there are).
In this context we can ignore keyword arguments, but a generic memoizer
ought to take care of that as well.
'''
cache = dict()
def wrapper(*rest):
if rest not in cache:
cache[rest] = callable_object(*rest)
return cache[rest]
return wrapper
inspect.getmodule = simple_memoize(inspect.getmodule)
| 11,588 | Python | .py | 291 | 28.443299 | 79 | 0.567645 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
815 | graphviz.py | gak_pycallgraph/pycallgraph/output/graphviz.py | from __future__ import division
import tempfile
import os
import textwrap
import subprocess as sub
from ..metadata import __version__
from ..exceptions import PyCallGraphException
from ..color import Color
from .output import Output
class GraphvizOutput(Output):
def __init__(self, **kwargs):
self.tool = 'dot'
self.output_file = 'pycallgraph.png'
self.output_type = 'png'
self.font_name = 'Verdana'
self.font_size = 7
self.group_font_size = 10
self.group_border_color = Color(0, 0, 0, 0.8)
Output.__init__(self, **kwargs)
self.prepare_graph_attributes()
@classmethod
def add_arguments(cls, subparsers, parent_parser, usage):
defaults = cls()
subparser = subparsers.add_parser(
'graphviz', help='Graphviz generation',
parents=[parent_parser], usage=usage,
)
subparser.add_argument(
'-l', '--tool', dest='tool', default=defaults.tool,
help='The tool from Graphviz to use, e.g. dot, neato, etc.',
)
cls.add_output_file(
subparser, defaults, 'The generated Graphviz file'
)
subparser.add_argument(
'-f', '--output-format', type=str, default=defaults.output_type,
dest='output_type',
help='Image format to produce, e.g. png, ps, dot, etc. '
'See http://www.graphviz.org/doc/info/output.html for more.',
)
subparser.add_argument(
'--font-name', type=str, default=defaults.font_name,
help='Name of the font to be used',
)
subparser.add_argument(
'--font-size', type=int, default=defaults.font_size,
help='Size of the font to be used',
)
def sanity_check(self):
self.ensure_binary(self.tool)
def prepare_graph_attributes(self):
generated_message = '\\n'.join([
r'Generated by Python Call Graph v%s' % __version__,
r'http://pycallgraph.slowchop.com',
])
self.graph_attributes = {
'graph': {
'overlap': 'scalexy',
'fontname': self.font_name,
'fontsize': self.font_size,
'fontcolor': Color(0, 0, 0, 0.5).rgba_web(),
'label': generated_message,
},
'node': {
'fontname': self.font_name,
'fontsize': self.font_size,
'fontcolor': Color(0, 0, 0).rgba_web(),
'style': 'filled',
'shape': 'rect',
},
'edge': {
'fontname': self.font_name,
'fontsize': self.font_size,
'fontcolor': Color(0, 0, 0).rgba_web(),
}
}
def done(self):
source = self.generate()
self.debug(source)
fd, temp_name = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(source)
cmd = '"{0}" -T{1} -o{2} {3}'.format(
self.tool, self.output_type, self.output_file, temp_name
)
self.verbose('Executing: {0}'.format(cmd))
try:
proc = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
ret, output = proc.communicate()
if ret:
raise PyCallGraphException(
'The command "%(cmd)s" failed with error '
'code %(ret)i.' % locals())
finally:
os.unlink(temp_name)
self.verbose('Generated {0} with {1} nodes.'.format(
self.output_file, len(self.processor.func_count),
))
def generate(self):
'''Returns a string with the contents of a DOT file for Graphviz to
parse.
'''
indent_join = '\n' + ' ' * 12
return textwrap.dedent('''\
digraph G {{
// Attributes
{0}
// Groups
{1}
// Nodes
{2}
// Edges
{3}
}}
'''.format(
indent_join.join(self.generate_attributes()),
indent_join.join(self.generate_groups()),
indent_join.join(self.generate_nodes()),
indent_join.join(self.generate_edges()),
))
def attrs_from_dict(self, d):
output = []
for attr, val in d.iteritems():
output.append('%s = "%s"' % (attr, val))
return ', '.join(output)
def node(self, key, attr):
return '"{0}" [{1}];'.format(
key, self.attrs_from_dict(attr),
)
def edge(self, edge, attr):
return '"{0.src_func}" -> "{0.dst_func}" [{1}];'.format(
edge, self.attrs_from_dict(attr),
)
def generate_attributes(self):
output = []
for section, attrs in self.graph_attributes.iteritems():
output.append('{0} [ {1} ];'.format(
section, self.attrs_from_dict(attrs),
))
return output
def generate_groups(self):
if not self.processor.config.groups:
return ''
output = []
for group, nodes in self.processor.groups():
funcs = [node.name for node in nodes]
funcs = '" "'.join(funcs)
group_color = self.group_border_color.rgba_web()
group_font_size = self.group_font_size
output.append(
'subgraph "cluster_{group}" {{ '
'"{funcs}"; '
'label = "{group}"; '
'fontsize = "{group_font_size}"; '
'fontcolor = "black"; '
'style = "bold"; '
'color="{group_color}"; }}'.format(**locals()))
return output
def generate_nodes(self):
output = []
for node in self.processor.nodes():
attr = {
'color': self.node_color_func(node).rgba_web(),
'label': self.node_label_func(node),
}
output.append(self.node(node.name, attr))
return output
def generate_edges(self):
output = []
for edge in self.processor.edges():
attr = {
'color': self.edge_color_func(edge).rgba_web(),
'label': self.edge_label_func(edge),
}
output.append(self.edge(edge, attr))
return output
| 6,410 | Python | .py | 176 | 25.346591 | 79 | 0.516551 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
816 | pickle.py | gak_pycallgraph/pycallgraph/output/pickle.py | try:
import cPickle as pickle
except ImportError:
import pickle
from .output import Output
class PickleOutput(Output):
def __init__(self, **kwargs):
self.fp = None
self.output_file = 'pycallgraph.dot'
Output.__init__(self, **kwargs)
@classmethod
def add_arguments(cls, subparsers, parent_parser, usage):
defaults = cls()
subparser = subparsers.add_parser(
'pickle',
help='Dump to a cPickle file for generation later',
parents=[parent_parser], usage=usage,
)
subparser.add_argument(
'-o', '--output-file', type=str, default=defaults.output_file,
help='The generated cPickle file',
)
return subparser
def done(self):
self.prepare_output_file()
pickle.dump(self.tracer, self.fp, pickle.HIGHEST_PROTOCOL)
| 882 | Python | .py | 26 | 25.961538 | 74 | 0.622196 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
817 | output.py | gak_pycallgraph/pycallgraph/output/output.py | import re
import os
from distutils.spawn import find_executable
from ..exceptions import PyCallGraphException
from ..color import Color
class Output(object):
'''Base class for all outputters.'''
def __init__(self, **kwargs):
self.node_color_func = self.node_color
self.edge_color_func = self.edge_color
self.node_label_func = self.node_label
self.edge_label_func = self.edge_label
# Update the defaults with anything from kwargs
[setattr(self, k, v) for k, v in kwargs.iteritems()]
def set_config(self, config):
'''
This is a quick hack to move the config variables set in Config into
the output module config variables.
'''
for k, v in config.__dict__.iteritems():
if hasattr(self, k) and \
callable(getattr(self, k)):
continue
setattr(self, k, v)
def node_color(self, node):
value = float(node.time.fraction * 2 + node.calls.fraction) / 3
return Color.hsv(value / 2 + .5, value, 0.9)
def edge_color(self, edge):
value = float(edge.time.fraction * 2 + edge.calls.fraction) / 3
return Color.hsv(value / 2 + .5, value, 0.7)
def node_label(self, node):
parts = [
'{0.name}',
'calls: {0.calls.value:n}',
'time: {0.time.value:f}s',
]
if self.processor.config.memory:
parts += [
'memory in: {0.memory_in.value_human_bibyte}',
'memory out: {0.memory_out.value_human_bibyte}',
]
return r'\n'.join(parts).format(node)
def edge_label(self, edge):
return '{0}'.format(edge.calls.value)
def sanity_check(self):
'''Basic checks for certain libraries or external applications. Raise
or warn if there is a problem.
'''
pass
@classmethod
def add_arguments(cls, subparsers):
pass
def reset(self):
pass
def set_processor(self, processor):
self.processor = processor
def start(self):
'''Initialise variables after initial configuration.'''
pass
def update(self):
'''Called periodically during a trace, but only when should_update is
set to True.
'''
raise NotImplementedError('update')
def should_update(self):
'''Return True if the update method should be called periodically.'''
return False
def done(self):
'''Called when the trace is complete and ready to be saved.'''
raise NotImplementedError('done')
def ensure_binary(self, cmd):
if find_executable(cmd):
return
raise PyCallGraphException(
'The command "{0}" is required to be in your path.'.format(cmd))
def normalize_path(self, path):
regex_user_expand = re.compile('\A~')
if regex_user_expand.match(path):
path = os.path.expanduser(path)
else:
path = os.path.expandvars(path) # expand, just in case
return path
def prepare_output_file(self):
if self.fp is None:
self.output_file = self.normalize_path(self.output_file)
self.fp = open(self.output_file, 'wb')
def verbose(self, text):
self.processor.config.log_verbose(text)
def debug(self, text):
self.processor.config.log_debug(text)
@classmethod
def add_output_file(cls, subparser, defaults, help):
subparser.add_argument(
'-o', '--output-file', type=str, default=defaults.output_file,
help=help,
)
| 3,645 | Python | .py | 96 | 29.145833 | 78 | 0.603066 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
818 | gephi.py | gak_pycallgraph/pycallgraph/output/gephi.py | import math
from .output import Output
class GephiOutput(Output):
def __init__(self, **kwargs):
self.fp = None
self.output_file = 'pycallgraph.gdf'
Output.__init__(self, **kwargs)
@classmethod
def add_arguments(cls, subparsers, parent_parser, usage):
defaults = cls()
subparser = subparsers.add_parser(
'gephi', help='Gephi GDF generation',
parents=[parent_parser], usage=usage,
)
cls.add_output_file(
subparser, defaults, 'The generated Gephi GDF file'
)
def generate(self):
'''Returns a string with the contents of a GDF file.'''
return u'\n'.join([
self.generate_nodes(),
self.generate_edges(),
]) + '\n'
def generate_nodes(self):
output = []
fields = u', '.join([
u'name VARCHAR',
u'label VARCHAR',
u'group VARCHAR',
u'calls INTEGER',
u'time DOUBLE',
u'memory_in INTEGER',
u'memory_out INTEGER',
u'color VARCHAR',
u'width DOUBLE',
])
output.append(u'nodedef> {}'.format(fields))
for node in self.processor.nodes():
fields = u','.join([str(a) for a in [
node.name,
node.name,
node.group,
node.calls.value,
node.time.value,
node.memory_in.value,
node.memory_out.value,
u"'{}'".format(self.node_color_func(node).rgb_csv()),
self.node_size(node),
]])
output.append(fields)
return '\n'.join(output)
def node_size(self, node):
return math.log(node.time.fraction * (math.e - 1) + 1) * 2 + 1
def generate_edges(self):
output = []
fields = u', '.join([
u'node1 VARCHAR',
u'node2 VARCHAR',
u'label VARCHAR',
u'labelvisible VARCHAR',
u'directed BOOLEAN',
u'color VARCHAR',
u'width DOUBLE',
])
output.append(u'edgedef> {}'.format(fields))
for edge in self.processor.edges():
fields = u','.join([str(a) for a in [
edge.src_func,
edge.dst_func,
self.edge_label(edge),
'true',
'true',
u"'{}'".format(self.edge_color_func(edge).rgb_csv()),
edge.calls.fraction * 2,
]])
output.append(fields)
return '\n'.join(output)
def done(self):
source = self.generate()
f = open(self.output_file, 'w')
f.write(source)
f.close()
| 2,757 | Python | .py | 82 | 22.207317 | 70 | 0.497554 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
819 | ubigraph.py | gak_pycallgraph/pycallgraph/output/ubigraph.py | try:
from xmlrpclib import Server
except ImportError:
from xmlrpc.client import Server
# from ..exceptions import PyCallGraphException
from .output import Output
class UbigraphOutput(Output):
def __init__(self, **kwargs):
self.fp = None
self.server_url = 'http://127.0.0.1:20738/RPC2'
Output.__init__(self, **kwargs)
def start(self):
server = Server(self.server_url)
self.graph = server.ubigraph
# Create a graph
for i in range(0, 10):
self.graph.new_vertex_w_id(i)
# Make some edges
for i in range(0, 10):
self.graph.new_edge(i, (i + 1) % 10)
def should_update(self):
return True
def update(self):
pass
@classmethod
def add_arguments(cls, subparsers, parent_parser, usage):
defaults = cls()
subparser = subparsers.add_parser(
'ubigraph',
help='Update an Ubigraph visualization in real time',
parents=[parent_parser], usage=usage,
)
subparser.add_argument(
'-s', '--server-url', type=str, default=defaults.server_url,
help='The Ubigraph server',
)
return subparser
def done(self):
pass
| 1,265 | Python | .py | 39 | 24.282051 | 72 | 0.601156 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
820 | __init__.py | gak_pycallgraph/pycallgraph/output/__init__.py | import collections
from .output import Output
from .graphviz import GraphvizOutput
from .gephi import GephiOutput
from .ubigraph import UbigraphOutput
from .pickle import PickleOutput
outputters = collections.OrderedDict([
('graphviz', GraphvizOutput),
('gephi', GephiOutput),
# ('ubigraph', UbigraphOutput),
])
| 327 | Python | .py | 11 | 27.363636 | 38 | 0.795527 | gak/pycallgraph | 1,816 | 335 | 61 | GPL-2.0 | 9/5/2024, 5:08:15 PM (Europe/Amsterdam) |
821 | HitPolicyTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/HitPolicyTest.py | import os
import unittest
from SpiffWorkflow.bpmn.serializer.helpers.dictionary import DictionaryConverter
from SpiffWorkflow.dmn.serializer.task_spec import BaseBusinessRuleTaskConverter
from SpiffWorkflow.camunda.specs import BusinessRuleTask
from .python_engine.PythonDecisionRunner import PythonDecisionRunner
class HitPolicyTest(unittest.TestCase):
def testHitPolicyUnique(self):
file_name = os.path.join(os.path.dirname(__file__), 'data', 'unique_hit.dmn')
runner = PythonDecisionRunner(file_name)
decision_table = runner.decision_table
self.assertEqual('UNIQUE', decision_table.hit_policy)
res = runner.result({'name': 'Larry'})
self.assertEqual(1, res['result'])
def testHitPolicyCollect(self):
file_name = os.path.join(os.path.dirname(__file__), 'data', 'collect_hit.dmn')
runner = PythonDecisionRunner(file_name)
decision_table = runner.decision_table
self.assertEqual('COLLECT', decision_table.hit_policy)
res = runner.result({'type': 'stooge'})
self.assertEqual(4, len(res['name']))
res = runner.result({'type': 'farmer'})
self.assertEqual(1, len(res['name']))
self.assertEqual('Elmer Fudd', res['name'][0])
def testSerializeHitPolicy(self):
file_name = os.path.join(os.path.dirname(__file__), 'data', 'collect_hit.dmn')
runner = PythonDecisionRunner(file_name)
decision_table = runner.decision_table
self.assertEqual("COLLECT", decision_table.hit_policy)
converter = BaseBusinessRuleTaskConverter(BusinessRuleTask, DictionaryConverter())
dict = converter.decision_table_to_dict(decision_table)
new_table = converter.decision_table_from_dict(dict)
self.assertEqual("COLLECT", new_table.hit_policy)
| 1,811 | Python | .py | 33 | 47.787879 | 90 | 0.712352 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
822 | ParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/ParserTest.py | import os
import unittest
from .python_engine.PythonDecisionRunner import PythonDecisionRunner
class ParserTest(unittest.TestCase):
def test_input_dash(self):
filename = os.path.join(os.path.dirname(__file__) , 'data', 'input_dash.dmn')
runner = PythonDecisionRunner(filename)
result = runner.result({'a': ''})
self.assertDictEqual(result, {'b': 'anything goes'}) | 402 | Python | .py | 9 | 39.444444 | 85 | 0.70844 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
823 | VersionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/VersionTest.py | import unittest
import os
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
data_dir = os.path.join(os.path.dirname(__file__), 'data', 'dmn_version_test')
class DmnVersionTest(unittest.TestCase):
def setUp(self):
self.parser = BpmnDmnParser()
self.parser.namespaces.update({'dmn': 'https://www.omg.org/spec/DMN/20191111/MODEL/'})
def test_load_v1_0(self):
filename = os.path.join(data_dir, 'dmn_version_20151101_test.dmn')
self.parser.add_dmn_file(filename)
def test_load_v1_1(self):
filename = os.path.join(data_dir, 'dmn_version_20191111_test.dmn')
self.parser.add_dmn_file(filename)
def test_load_v1_2_supported(self):
self._assert_parse_all_pass('v1_2_supported')
def test_load_v1_2_unsupported(self):
self._assert_parse_all_fail('v1_2_unsupported')
def test_load_v1_3_supported(self):
self._assert_parse_all_pass('v1_3_supported')
def test_load_v1_3_unsupported(self):
self._assert_parse_all_fail('v1_3_unsupported')
def _assert_parse_all_pass(self, dir_path):
dirname = os.path.join(data_dir, dir_path)
self.parser.add_dmn_files_by_glob(f'{dirname}/*.dmn')
for parser in self.parser.dmn_parsers.values():
parser.parse()
self.assertIsNotNone(parser.bpmn_id)
self.assertIsNotNone(parser.get_name())
def _assert_parse_all_fail(self, dir_path):
dirname = os.path.join(data_dir, dir_path)
with self.assertRaises(IndexError):
self.parser.add_dmn_files_by_glob(f'{dirname}/*.dmn')
| 1,611 | Python | .py | 33 | 41.272727 | 94 | 0.671775 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
824 | DecisionRunner.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/DecisionRunner.py | import os
from lxml import etree
from SpiffWorkflow.dmn.engine.DMNEngine import DMNEngine
from SpiffWorkflow.dmn.parser.DMNParser import DMNParser
from SpiffWorkflow.bpmn.parser.node_parser import DEFAULT_NSMAP
class WorkflowSpec:
def __init__(self):
self.file = 'my_mock_file'
self.name = 'Mock Workflow Spec'
self.task_specs = {}
class Workflow:
def __init__(self, script_engine):
self.script_engine = script_engine
self.parent = None
self.spec = WorkflowSpec()
self.top_workflow = self
class TaskSpec:
def __init__(self):
self.name = "MockTestSpec"
self.bpmn_name = "Mock Test Spec"
class Task:
def __init__(self, script_engine, data):
self.data = data
self.workflow = Workflow(script_engine)
self.task_spec = TaskSpec()
class DecisionRunner:
def __init__(self, script_engine, filename, path=''):
self.script_engine = script_engine
fn = os.path.join(os.path.dirname(__file__), path, 'data', filename)
with open(fn) as fh:
node = etree.parse(fh)
nsmap = DEFAULT_NSMAP.copy()
nsmap.update(node.getroot().nsmap)
if None in nsmap:
nsmap['dmn'] = nsmap.pop(None)
self.dmnParser = DMNParser(None, node.getroot(), nsmap)
self.dmnParser.parse()
decision = self.dmnParser.decision
assert len(decision.decisionTables) == 1, \
'Exactly one decision table should exist! (%s)' \
% (len(decision.decisionTables))
self.decision_table = decision.decisionTables[0]
self.dmnEngine = DMNEngine(self.decision_table)
def decide(self, context):
"""Makes the rather ugly assumption that there is only one
rule match for a decision - which was previously the case"""
if not isinstance(context, dict):
context = {'input': context}
task = Task(self.script_engine, context)
return self.dmnEngine.decide(task)[0]
def result(self, context):
task = Task(self.script_engine, context)
return self.dmnEngine.result(task)
| 2,143 | Python | .py | 53 | 32.792453 | 76 | 0.648675 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
825 | FeelDictDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelDictDecisionTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelDictDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('dict_decision_feel.dmn')
def test_string_decision_string_output1(self):
data = {"allergies": {
"PEANUTS": {"delicious": True},
"SPAM": {"delicious": False}
}}
res = self.runner.decide(data)
self.assertEqual(res.description, 'They are allergic to peanuts')
def test_string_decision_string_output2(self):
data = {"allergies": {
"SpAm": {"delicious": False},
"SPAM": {"delicious": False}
}}
res = self.runner.decide(data)
self.assertEqual(res.description, 'They are not allergic to peanuts')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelDictDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,112 | Python | .py | 27 | 32.888889 | 81 | 0.637546 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
826 | FeelLongDoubleComparisonTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelLongDoubleComparisonTest.py | import unittest
from decimal import Decimal
from .FeelDecisionRunner import FeelDecisionRunner
class FeelLongOrDoubleDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('long_or_double_decision_comparison_feel.dmn')
def test_long_or_double_decision_string_output1(self):
res = self.runner.decide({"Age":Decimal('30.5')})
self.assertEqual(res.description, '30.5 Row Annotation')
def test_long_or_double_decision_stringz_output2(self):
res = self.runner.decide({"Age":Decimal('25.3')})
self.assertEqual(res.description, 'L Row Annotation')
def test_long_or_double_decision_string_output3(self):
res = self.runner.decide({"Age":Decimal('25.4')})
self.assertEqual(res.description, 'H Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,103 | Python | .py | 23 | 42.347826 | 89 | 0.72243 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
827 | FeelBoolDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelBoolDecisionTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelBoolDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('bool_decision_feel.dmn')
def test_bool_decision_string_output1(self):
res = self.runner.decide(True)
self.assertEqual(res.description, 'Y Row Annotation')
def test_bool_decision_string_output2(self):
res = self.runner.decide(False)
self.assertEqual(res.description, 'N Row Annotation')
def test_bool_decision_string_output3(self):
res = self.runner.decide(None)
self.assertEqual(res.description, 'ELSE Row Annotation')
| 772 | Python | .py | 18 | 36.611111 | 67 | 0.718876 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
828 | FeelStringIntegerDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelStringIntegerDecisionTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelStringIntegerDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('string_integer_decision_feel.dmn')
def test_string_integer_decision_string_output1(self):
res = self.runner.decide({"Gender":'m', "Age": 30})
self.assertEqual(res.description, 'm30 Row Annotation')
def test_string_integer_decision_string_output2(self):
res = self.runner.decide({"Gender":'m', "Age": 24})
self.assertEqual(res.description, 'mL Row Annotation')
def test_string_integer_decision_string_output3(self):
res = self.runner.decide({"Gender":'m', "Age": 25})
self.assertEqual(res.description, 'mH Row Annotation')
def test_string_integer_decision_string_output4(self):
res = self.runner.decide({"Gender":'f', "Age": -1})
self.assertEqual(res.description, 'fL Row Annotation')
def test_string_integer_decision_string_output5(self):
res = self.runner.decide({"Gender":'x', "Age": 0})
self.assertEqual(res.description, 'ELSE Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelStringIntegerDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,438 | Python | .py | 28 | 45.107143 | 90 | 0.702645 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
829 | FeelListDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelListDecisionTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelListDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('list_decision_feel.dmn')
def test_string_decision_string_output1(self):
res = self.runner.decide({'allergies':["PEANUTS", "SPAM"]})
self.assertEqual(res.description, 'They are allergic to peanuts')
def test_string_decision_string_output2(self):
res = self.runner.decide({'allergies':["SPAM", "SPAM"]})
self.assertEqual(res.description, 'They are not allergic to peanuts')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelListDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 882 | Python | .py | 19 | 41 | 81 | 0.719298 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
830 | FeelNearMissNameTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelNearMissNameTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelNearMissTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.data = {
"Exclusive": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"eXclusive": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"EXCLUSIVE": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"personnel": [
{
"PersonnelType": "Faculty",
"label": "Steven K Funkhouser (sf4d)",
"value": "sf4d"
}
],
"shared": []
}
cls.runner = FeelDecisionRunner('exclusive_feel.dmn')
def test_string_decision_string_output1(self):
self.assertRaisesRegex(Exception,
".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\].+",
self.runner.decide,
self.data)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelNearMissTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,431 | Python | .py | 43 | 19.744186 | 79 | 0.461538 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
831 | FeelDecisionRunner.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelDecisionRunner.py | from SpiffWorkflow.bpmn.script_engine.feel_engine import FeelLikeScriptEngine
from ..DecisionRunner import DecisionRunner
class FeelDecisionRunner(DecisionRunner):
def __init__(self, filename):
super().__init__(FeelLikeScriptEngine(), filename, 'feel_engine')
| 275 | Python | .py | 5 | 51 | 77 | 0.786517 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
832 | FeelDateDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelDateDecisionTest.py | import unittest
from datetime import datetime
from SpiffWorkflow.dmn.parser.DMNParser import DMNParser
from .FeelDecisionRunner import FeelDecisionRunner
class FeelDateDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('date_decision_feel.dmn')
def test_date_decision_string_output1(self):
res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '111 Row Annotation')
def test_date_decision_string_output2(self):
res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '311 Row Annotation')
def test_date_decision_string_output3(self):
res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '<3.11 Row Annotation')
def test_date_decision_string_output4(self):
res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '>3.11 Row Annotation')
def test_date_decision_string_output5(self):
res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '>13.11<14.11 Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelDateDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,644 | Python | .py | 30 | 48.866667 | 95 | 0.734082 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
833 | FeelStringDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelStringDecisionTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelStringDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('string_decision_feel.dmn')
def test_string_decision_string_output1(self):
res = self.runner.decide({"Gender":'m'})
self.assertEqual(res.description, 'm Row Annotation')
def test_string_decision_string_output2(self):
res = self.runner.decide({"Gender":'f'})
self.assertEqual(res.description, 'f Row Annotation')
def test_string_decision_string_output3(self):
res = self.runner.decide({"Gender":'y'})
self.assertEqual(res.description, 'NOT x Row Annotation')
def test_string_decision_string_output4(self):
res = self.runner.decide({"Gender":'x'})
self.assertEqual(res.description, 'ELSE Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,158 | Python | .py | 25 | 40.28 | 83 | 0.707925 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
834 | FeelIntegerDecisionRangeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionRangeTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelIntegerDecisionRangeTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
def test_integer_decision_string_output_inclusive(self):
runner = FeelDecisionRunner('integer_decision_range_inclusive_feel.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, '100-110 Inclusive Annotation')
res = runner.decide({"Age":99})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, '100-110 Inclusive Annotation')
res = runner.decide({"Age":111})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_integer_decision_string_output_exclusive(self):
runner = FeelDecisionRunner('integer_decision_range_exclusive_feel.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":101})
self.assertEqual(res.description, '100-110 Exclusive Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":109})
self.assertEqual(res.description, '100-110 Exclusive Annotation')
def test_integer_decision_string_output_excl_inclusive(self):
runner = FeelDecisionRunner('integer_decision_range_excl_inclusive_feel.dmn')
res = runner.decide({'Age': 100})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({'Age':101})
self.assertEqual(res.description, '100-110 ExclInclusive Annotation')
res = runner.decide({'Age':110})
self.assertEqual(res.description, '100-110 ExclInclusive Annotation')
res = runner.decide({'Age':111})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_integer_decision_string_output_incl_exclusive(self):
runner = FeelDecisionRunner('integer_decision_range_incl_exclusive_feel.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, '100-110 InclExclusive Annotation')
res = runner.decide({"Age":99})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":109})
self.assertEqual(res.description, '100-110 InclExclusive Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionRangeTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 2,790 | Python | .py | 50 | 47.84 | 89 | 0.696613 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
835 | FeelIntegerDecisionComparisonTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelIntegerDecisionComparisonTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelIntegerDecisionComparisonTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('integer_decision_comparison_feel.dmn')
def test_integer_decision_string_output1(self):
res = self.runner.decide(30)
self.assertEqual(res.description, '30 Row Annotation')
def test_integer_decision_string_output2(self):
res = self.runner.decide(24)
self.assertEqual(res.description, 'L Row Annotation')
def test_integer_decision_string_output3(self):
res = self.runner.decide(25)
self.assertEqual(res.description, 'H Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionComparisonTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 990 | Python | .py | 22 | 39.227273 | 94 | 0.734098 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
836 | FeelKwargsParameterTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelKwargsParameterTest.py | import unittest
from .FeelDecisionRunner import FeelDecisionRunner
class FeelStringDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = FeelDecisionRunner('kwargs_parameter_feel.dmn')
def test_string_decision_string_output1(self):
res = self.runner.decide({"Gender":'m'})
self.assertEqual(res.description, 'm Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 663 | Python | .py | 16 | 36.5 | 83 | 0.732813 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
837 | FeelLongOrDoubleRangeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/feel_engine/FeelLongOrDoubleRangeTest.py | import unittest
from decimal import Decimal
from .FeelDecisionRunner import FeelDecisionRunner
class FeelLongOrDoubleDecisionRangeTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
def test_long_or_double_decision_string_output_inclusive(self):
runner = FeelDecisionRunner('long_or_double_decision_range_inclusive_feel.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation')
res = runner.decide({"Age":Decimal('99')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation')
res = runner.decide({"Age":Decimal('111')})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_long_or_double_decision_string_output_exclusive(self):
runner = FeelDecisionRunner('long_or_double_decision_range_exclusive_feel.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('101')})
self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('109')})
self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation')
def test_long_or_double_decision_string_output_excl_inclusive(self):
runner = FeelDecisionRunner('long_or_double_decision_range_excl_inclusive_feel.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('101')})
self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation')
res = runner.decide({"Age":Decimal('111')})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_long_or_double_decision_string_output_incl_exclusive(self):
runner = FeelDecisionRunner('long_or_double_decision_range_incl_exclusive_feel.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation')
res = runner.decide({"Age":Decimal('99')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('109')})
self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionRangeTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 3,133 | Python | .py | 51 | 53.568627 | 94 | 0.695681 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
838 | IntegerDecisionComparisonTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionComparisonTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class IntegerDecisionComparisonTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('integer_decision_comparison.dmn')
def test_integer_decision_string_output1(self):
res = self.runner.decide({"Age":30})
self.assertEqual(res.description, '30 Row Annotation')
def test_integer_decision_string_output2(self):
res = self.runner.decide({"Age":24})
self.assertEqual(res.description, 'L Row Annotation')
def test_integer_decision_string_output3(self):
res = self.runner.decide({"Age":25})
self.assertEqual(res.description, 'H Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionComparisonTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,007 | Python | .py | 22 | 40 | 90 | 0.724385 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
839 | DateDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/DateDecisionTest.py | import unittest
from datetime import datetime
from SpiffWorkflow.dmn.parser.DMNParser import DMNParser
from .PythonDecisionRunner import PythonDecisionRunner
class DateDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('date_decision.dmn')
def test_date_decision_string_output1(self):
res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '111 Row Annotation')
def test_date_decision_string_output2(self):
res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '311 Row Annotation')
def test_date_decision_string_output3(self):
res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '<3.11 Row Annotation')
def test_date_decision_string_output4(self):
res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '>3.11 Row Annotation')
def test_date_decision_string_output5(self):
res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT))
self.assertEqual(res.description, '>13.11<14.11 Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(DateDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,636 | Python | .py | 30 | 48.633333 | 95 | 0.733542 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
840 | NearMissNameTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/NearMissNameTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class NearMissTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.data = {
"Exclusive": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"eXclusive": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"EXCLUSIVE": [
{
"ExclusiveSpaceRoomID": "121",
}
],
"personnel": [
{
"PersonnelType": "Faculty",
"label": "Steven K Funkhouser (sf4d)",
"value": "sf4d"
}
],
"shared": []
}
cls.runner = PythonDecisionRunner('exclusive.dmn')
def test_string_decision_string_output1(self):
self.assertRaisesRegex(Exception,
".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\].+",
self.runner.decide,
self.data)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(NearMissTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,424 | Python | .py | 43 | 19.581395 | 79 | 0.459519 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
841 | BoolDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/BoolDecisionTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class BoolDecisionTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('bool_decision.dmn')
def test_bool_decision_string_output1(self):
res = self.runner.decide({'input': True})
self.assertEqual(res.description, 'Y Row Annotation')
def test_bool_decision_string_output2(self):
res = self.runner.decide({'input': False})
self.assertEqual(res.description, 'N Row Annotation')
def test_bool_decision_string_output3(self):
res = self.runner.decide(None)
self.assertEqual(res.description, 'ELSE Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(BoolDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 881 | Python | .py | 19 | 40.473684 | 77 | 0.728019 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
842 | ListDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/ListDecisionTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class ListDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('list_decision.dmn')
def test_string_decision_string_output1(self):
res = self.runner.decide({'allergies':["PEANUTS", "SPAM"]})
self.assertEqual(res.description, 'They are allergic to peanuts')
def test_string_decision_string_output2(self):
res = self.runner.decide({'allergies':["SPAM", "SPAM"]})
self.assertEqual(res.description, 'They are not allergic to peanuts')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ListDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 876 | Python | .py | 19 | 40.631579 | 77 | 0.71816 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
843 | IntegerDecisionRangeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/IntegerDecisionRangeTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class IntegerDecisionRangeTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
def test_integer_decision_string_output_inclusive(self):
runner = PythonDecisionRunner('integer_decision_range_inclusive.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, '100-110 Inclusive Annotation')
res = runner.decide({"Age":99})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, '100-110 Inclusive Annotation')
res = runner.decide({"Age":111})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_integer_decision_string_output_exclusive(self):
runner = PythonDecisionRunner('integer_decision_range_exclusive.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":101})
self.assertEqual(res.description, '100-110 Exclusive Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":109})
self.assertEqual(res.description, '100-110 Exclusive Annotation')
def test_integer_decision_string_output_excl_inclusive(self):
runner = PythonDecisionRunner('integer_decision_range_excl_inclusive.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":101})
self.assertEqual(res.description, '100-110 ExclInclusive Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, '100-110 ExclInclusive Annotation')
res = runner.decide({"Age":111})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_integer_decision_string_output_incl_exclusive(self):
runner = PythonDecisionRunner('integer_decision_range_incl_exclusive.dmn')
res = runner.decide({"Age":100})
self.assertEqual(res.description, '100-110 InclExclusive Annotation')
res = runner.decide({"Age":99})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":110})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":109})
self.assertEqual(res.description, '100-110 InclExclusive Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionRangeTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 2,774 | Python | .py | 50 | 47.5 | 85 | 0.696554 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
844 | LongDoubleComparisonTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/LongDoubleComparisonTest.py | import unittest
from decimal import Decimal
from .PythonDecisionRunner import PythonDecisionRunner
class LongOrDoubleDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('long_or_double_decision_comparison.dmn')
def test_long_or_double_decision_string_output1(self):
res = self.runner.decide({"Age":Decimal('30.5')})
self.assertEqual(res.description, '30.5 Row Annotation')
def test_long_or_double_decision_string_output2(self):
res = self.runner.decide({"Age":Decimal('25.3')})
self.assertEqual(res.description, 'L Row Annotation')
def test_long_or_double_decision_string_output3(self):
res = self.runner.decide({"Age":Decimal('25.4')})
self.assertEqual(res.description, 'H Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,095 | Python | .py | 23 | 42 | 85 | 0.721281 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
845 | LongOrDoubleRangeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/LongOrDoubleRangeTest.py | import unittest
from decimal import Decimal
from .PythonDecisionRunner import PythonDecisionRunner
class LongOrDoubleDecisionRangeTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
def test_long_or_double_decision_string_output_inclusive(self):
runner = PythonDecisionRunner('long_or_double_decision_range_inclusive.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation')
res = runner.decide({"Age":Decimal('99')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation')
res = runner.decide({"Age":Decimal('111')})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_long_or_double_decision_string_output_exclusive(self):
runner = PythonDecisionRunner('long_or_double_decision_range_exclusive.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('101')})
self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('109')})
self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation')
def test_long_or_double_decision_string_output_excl_inclusive(self):
runner = PythonDecisionRunner('long_or_double_decision_range_excl_inclusive.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('101')})
self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation')
res = runner.decide({"Age":Decimal('111')})
self.assertEqual(res.description, 'ELSE Row Annotation')
def test_long_or_double_decision_string_output_incl_exclusive(self):
runner = PythonDecisionRunner('long_or_double_decision_range_incl_exclusive.dmn')
res = runner.decide({"Age":Decimal('100.05')})
self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation')
res = runner.decide({"Age":Decimal('99')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('110.05')})
self.assertEqual(res.description, 'ELSE Row Annotation')
res = runner.decide({"Age":Decimal('109')})
self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionRangeTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 3,117 | Python | .py | 51 | 53.254902 | 90 | 0.695395 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
846 | StringDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/StringDecisionTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class StringDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('string_decision.dmn')
def test_string_decision_string_output1(self):
res = self.runner.decide({"Gender":'m'})
self.assertEqual(res.description, 'm Row Annotation')
def test_string_decision_string_output2(self):
res = self.runner.decide({"Gender":'f'})
self.assertEqual(res.description, 'f Row Annotation')
def test_string_decision_string_output3(self):
res = self.runner.decide({"Gender":'y'})
self.assertEqual(res.description, 'NOT x Row Annotation')
def test_string_decision_string_output4(self):
res = self.runner.decide({"Gender":'x'})
self.assertEqual(res.description, 'ELSE Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,151 | Python | .py | 25 | 40 | 79 | 0.706989 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
847 | StringIntegerDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/StringIntegerDecisionTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class StringIntegerDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('string_integer_decision.dmn')
def test_string_integer_decision_string_output1(self):
res = self.runner.decide({"Gender":'m', "Age":30})
self.assertEqual(res.description, 'm30 Row Annotation')
def test_string_integer_decision_string_output2(self):
res = self.runner.decide({"Gender":'m', "Age":24})
self.assertEqual(res.description, 'mL Row Annotation')
def test_string_integer_decision_string_output3(self):
res = self.runner.decide({"Gender":'m', "Age":25})
self.assertEqual(res.description, 'mH Row Annotation')
def test_string_integer_decision_string_output4(self):
res = self.runner.decide({"Gender":'f', "Age":-1})
self.assertEqual(res.description, 'fL Row Annotation')
def test_string_integer_decision_string_output5(self):
res = self.runner.decide({"Gender":'x', "Age":0})
self.assertEqual(res.description, 'ELSE Row Annotation')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(StringIntegerDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,426 | Python | .py | 28 | 44.678571 | 86 | 0.704398 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
848 | PythonDecisionRunner.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/PythonDecisionRunner.py | import datetime
from decimal import Decimal
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..DecisionRunner import DecisionRunner
class PythonDecisionRunner(DecisionRunner):
def __init__(self, filename):
environment = TaskDataEnvironment({'Decimal': Decimal, 'datetime': datetime})
super().__init__(PythonScriptEngine(environment=environment), filename, 'python_engine')
| 438 | Python | .py | 8 | 50.75 | 96 | 0.795775 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
849 | InvalidBusinessRuleNameErrorTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/InvalidBusinessRuleNameErrorTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class InvalidBusinessRuleNameErrorTest(unittest.TestCase):
def test_integer_decision_string_output_inclusive(self):
runner = PythonDecisionRunner('invalid_decision_name_error.dmn')
try:
runner.decide({'spam': 1})
except Exception as e:
self.assertRegex(str(e), "Did you mean 'spam'")
def suite():
return unittest.TestLoader().loadTestsFromTestCase(InvalidBusinessRuleNameErrorTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 596 | Python | .py | 13 | 39.769231 | 88 | 0.733102 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
850 | DictDecisionTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/dmn/python_engine/DictDecisionTest.py | import unittest
from .PythonDecisionRunner import PythonDecisionRunner
class DictDecisionTestClass(unittest.TestCase):
"""
Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/
"""
@classmethod
def setUpClass(cls):
cls.runner = PythonDecisionRunner('dict_decision.dmn')
def test_string_decision_string_output1(self):
data = {"allergies": {
"PEANUTS": {"delicious": True},
"SPAM": {"delicious": False}
}}
res = self.runner.decide(data)
self.assertEqual(res.description, 'They are allergic to peanuts')
def test_string_decision_string_output2(self):
data = {"allergies": {
"SpAm": {"delicious": False},
"SPAM": {"delicious": False}
}}
res = self.runner.decide(data)
self.assertEqual(res.description, 'They are not allergic to peanuts')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(DictDecisionTestClass)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,105 | Python | .py | 27 | 32.62963 | 77 | 0.636109 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
851 | pattern_base.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/pattern_base.py | import os
import time
import warnings
from lxml import etree
from SpiffWorkflow import Workflow
from SpiffWorkflow.task import Task
from SpiffWorkflow.specs import WorkflowSpec
from SpiffWorkflow.serializer.prettyxml import XmlSerializer as PrettyXmlSerializer
from SpiffWorkflow.serializer.xml import XmlSerializer
from SpiffWorkflow.serializer.dict import DictionarySerializer
from SpiffWorkflow.serializer.json import JSONSerializer
from SpiffWorkflow.serializer.exceptions import TaskNotSupportedError
from .util import track_workflow
data_dir = os.path.join(os.path.dirname(__file__), 'data')
xml_serializer = XmlSerializer()
dict_serializer = DictionarySerializer()
json_serializer = JSONSerializer()
class WorkflowPatternTestCase:
def init_thread_pool(self):
Task.id_pool = 0
Task.thread_id_pool = 0
def load_from_xml(self, pattern):
self.init_thread_pool()
prefix = os.path.join(data_dir, pattern)
filename = f'{prefix}.xml'
with open(filename) as fp:
xml = etree.parse(fp).getroot()
# This "serializer" is a parser; it doesn't deserialize.
# Because we use it to load all the workflows, consider it tested here.
serializer = PrettyXmlSerializer()
self.spec = WorkflowSpec.deserialize(serializer, xml, filename=filename)
path_file = f'{prefix}.path'
if os.path.exists(path_file):
with open(path_file) as fp:
self.expected_path = fp.read()
else:
self.expected_path = None
data_file = f'{prefix}.data'
if os.path.exists(data_file):
with open(data_file) as fp:
self.expected_data = fp.read()
else:
self.expected_data = None
self.taken_path = track_workflow(self.spec)
self.workflow = Workflow(self.spec)
def serialize(self, spec_or_workflow, serializer):
try:
before = spec_or_workflow.serialize(serializer)
restored = spec_or_workflow.deserialize(serializer, before)
after = restored.serialize(serializer)
return before, after
except TaskNotSupportedError as exc:
warnings.warn(f'Unsupported task spec: {exc}')
return None, None
def run_workflow(self):
# We allow the workflow to require a maximum of 5 seconds to complete, to allow for testing long running tasks.
for i in range(10):
self.workflow.run_all(False)
if self.workflow.is_completed():
break
time.sleep(0.5)
def test_run_workflow(self):
self.run_workflow()
self.assertTrue(self.workflow.is_completed())
# Check whether the correct route was taken.
if self.expected_path is not None:
taken_path = '\n'.join(self.taken_path) + '\n'
self.assertEqual(taken_path, self.expected_path)
# Check data availibility.
if self.expected_data is not None:
result = self.workflow.get_data('data', '')
self.assertIn(result, self.expected_data)
def test_xml_serializer(self):
def prepare_result(item):
return etree.tostring(item, pretty_print=True)
before, after = self.serialize(self.spec, xml_serializer)
self.assertEqual(prepare_result(before), prepare_result(after))
self.assertIsInstance(before, etree._Element)
before, after = self.serialize(self.workflow, xml_serializer)
if before is not None:
self.assertEqual(prepare_result(before), prepare_result(after))
def test_dictionary_serializer(self):
before, after = self.serialize(self.spec, dict_serializer)
self.assertDictEqual(before, after)
self.assertIsInstance(before, dict)
before, after = self.serialize(self.workflow, dict_serializer)
if before is not None:
self.assertDictEqual(before, after)
def test_json_serializer(self):
before, after = self.serialize(self.spec, json_serializer)
self.assertEqual(before, after)
self.assertIsInstance(before, str)
before, after = self.serialize(self.workflow, json_serializer)
self.assertEqual(before, after)
| 4,273 | Python | .py | 94 | 36.861702 | 119 | 0.676067 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
852 | util.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/util.py | import time
from SpiffWorkflow import Workflow
from SpiffWorkflow.specs import SubWorkflow
def on_ready_cb(workflow, task, taken_path):
reached_key = "%s_reached" % str(task.task_spec.name)
n_reached = task.get_data(reached_key, 0) + 1
task.set_data(**{reached_key: n_reached,
'two': 2,
'three': 3,
'test_attribute1': 'false',
'test_attribute2': 'true'})
# Collect a list of all data.
atts = []
for key, value in list(task.data.items()):
if key in ['data',
'two',
'three',
'test_attribute1',
'test_attribute2']:
continue
if key.endswith('reached'):
continue
atts.append('='.join((key, str(value))))
# Collect a list of all task data.
props = []
for key, value in list(task.task_spec.data.items()):
props.append('='.join((key, str(value))))
# Store the list of data in the workflow.
atts = ';'.join(atts)
props = ';'.join(props)
old = task.get_data('data', '')
data = task.task_spec.name + ': ' + atts + '/' + props + '\n'
task.set_data(data=old + data)
return True
def on_complete_cb(workflow, task, taken_path):
# Record the path.
indent = ' ' * task.depth
taken_path.append('%s%s' % (indent, task.task_spec.name))
# In workflows that load a subworkflow, the newly loaded children
# will not have on_ready_cb() assigned. By using this function, we
# re-assign the function in every step, thus making sure that new
# children also call on_ready_cb().
for child in task.children:
track_task(child.task_spec, taken_path)
return True
def on_update_cb(workflow, task, taken_path):
for child in task.children:
track_task(child.task_spec, taken_path)
return True
def track_task(task_spec, taken_path):
# Disconnecting and reconnecting makes absolutely no sense but inexplicably these tests break
# if just connected based on a check that they're not
if task_spec.ready_event.is_connected(on_ready_cb):
task_spec.ready_event.disconnect(on_ready_cb)
task_spec.ready_event.connect(on_ready_cb, taken_path)
if task_spec.completed_event.is_connected(on_complete_cb):
task_spec.completed_event.disconnect(on_complete_cb)
task_spec.completed_event.connect(on_complete_cb, taken_path)
if isinstance(task_spec, SubWorkflow):
if task_spec.update_event.is_connected(on_update_cb):
task_spec.update_event.disconnect(on_update_cb)
task_spec.update_event.connect(on_update_cb, taken_path)
def track_workflow(wf_spec, taken_path=None):
if taken_path is None:
taken_path = []
for name in wf_spec.task_specs:
track_task(wf_spec.task_specs[name], taken_path)
return taken_path
def run_workflow(test, wf_spec, expected_path, expected_data, workflow=None):
# Execute all tasks within the Workflow.
if workflow is None:
taken_path = track_workflow(wf_spec)
workflow = Workflow(wf_spec)
else:
taken_path = track_workflow(workflow.spec)
test.assertFalse(workflow.is_completed())
try:
# We allow the workflow to require a maximum of 5 seconds to
# complete, to allow for testing long running tasks.
for i in range(10):
workflow.run_all(False)
if workflow.is_completed():
break
time.sleep(0.5)
except Exception:
workflow.task_tree.dump()
raise
test.assertTrue(workflow.is_completed())
# Check whether the correct route was taken.
if expected_path is not None:
taken_path = '\n'.join(taken_path) + '\n'
test.assertEqual(taken_path, expected_path)
# Check data availibility.
if expected_data is not None:
result = workflow.get_data('data', '')
test.assertIn(result, expected_data)
return workflow
| 4,045 | Python | .py | 97 | 33.979381 | 97 | 0.63352 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
853 | IteratorTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/IteratorTest.py | import unittest
import os
from datetime import datetime
from lxml import etree
from SpiffWorkflow import TaskState, Workflow
from SpiffWorkflow.specs.Cancel import Cancel
from SpiffWorkflow.specs.Simple import Simple
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.serializer.prettyxml import XmlSerializer
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class IterationTest(unittest.TestCase):
def setUp(self):
xml_file = os.path.join(data_dir, 'iteration_test.xml')
with open(xml_file) as fp:
xml = etree.parse(fp).getroot()
wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml)
self.workflow = Workflow(wf_spec)
def get_tasks_updated_after(self):
start = self.workflow.get_next_task(end_at_spec='Start')
start.run()
updated = datetime.now().timestamp()
for task in self.workflow.get_tasks(state=TaskState.READY):
task.run()
return updated
class DepthFirstTest(IterationTest):
def test_get_tasks_updated_after(self):
updated = super().get_tasks_updated_after()
tasks = self.workflow.get_tasks(updated_ts=updated)
self.assertListEqual(
[t.task_spec.name for t in tasks],
['a', 'a1', 'a2', 'c', 'b', 'b1', 'b2']
)
def test_get_tasks_end_at(self):
tasks = self.workflow.get_tasks(end_at_spec='c')
self.assertEqual(
[t.task_spec.name for t in tasks],
['Start', 'a', 'a1', 'last', 'End', 'a2', 'last', 'End', 'c', 'b', 'b1', 'last', 'End', 'b2', 'last', 'End']
)
def test_get_tasks_max_depth(self):
tasks = self.workflow.get_tasks(max_depth=2)
self.assertEqual(
[t.task_spec.name for t in tasks],
['Start', 'a', 'a1', 'a2', 'c', 'b', 'b1', 'b2']
)
class BreadthFirstTest(IterationTest):
def test_get_tasks_updated_after(self):
updated = super().get_tasks_updated_after()
tasks = self.workflow.get_tasks(updated_ts=updated, depth_first=False)
self.assertListEqual(
[t.task_spec.name for t in tasks],
['a', 'b', 'a1', 'a2', 'c', 'b1', 'b2']
)
def test_get_tasks_end_at(self):
tasks = self.workflow.get_tasks(end_at_spec='c', depth_first=False)
self.assertEqual(
[t.task_spec.name for t in tasks],
['Start', 'a', 'b', 'a1', 'a2', 'c', 'b1', 'b2', 'last', 'last', 'last', 'last', 'End', 'End', 'End', 'End']
)
def test_get_tasks_max_depth(self):
tasks = self.workflow.get_tasks(max_depth=2, depth_first=False)
self.assertEqual(
[t.task_spec.name for t in tasks],
['Start', 'a', 'b', 'a1', 'a2', 'c', 'b1', 'b2']
)
| 2,796 | Python | .py | 64 | 35.71875 | 120 | 0.605224 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
854 | DeepMergeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/DeepMergeTest.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from SpiffWorkflow.util.deep_merge import DeepMerge
class DeepMergeTest(TestCase):
def testBasicMerge(self):
"""
Tests that we can merge one dictionary into another dictionary deeply
and that dot-notation is correctly parsed and processed.
"""
a = {"fruit": {"apples": "tasty"}}
b = {"fruit": {"oranges": "also tasty"}}
c = DeepMerge.merge(a, b)
self.assertEqual({"fruit":
{"apples": "tasty",
"oranges": "also tasty"
}
}, c)
def testOutOfOrderMerge(self):
a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]}
b = {"foods": [{"fruit": {"oranges": "also tasty", "apples": "tasty"}},
{"canned meats": {"spam": "nope."}}]}
c = DeepMerge.merge(a, b)
self.assertEqual({"foods": [
{"fruit":
{"apples": "tasty",
"oranges": "also tasty"
}
},
{"canned meats":
{"spam": "nope."}
}
]}, c)
def testMixOfArrayTypes(self):
a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}},
{"canned_meats":["spam", "more spam"]}]}
b = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}},
{"canned_meats":["wonderful spam", "spam", "more spam"]}]}
c = DeepMerge.merge(a, b)
self.assertEqual({"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}},
{"canned_meats":["spam", "more spam", "wonderful spam"]}]}, c)
def testRemovingItemsFromArrays(self):
a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}},
{"canned_meats":["spam", "more spam"]}]}
b = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]}
c = DeepMerge.merge(a, b)
self.assertEqual({"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]}, c)
| 2,181 | Python | .py | 46 | 34.673913 | 97 | 0.479736 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
855 | PersistSmallWorkflowTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/PersistSmallWorkflowTest.py | import unittest
from SpiffWorkflow import TaskState, Workflow
from SpiffWorkflow.specs import Join, MultiChoice, WorkflowSpec, Simple
from SpiffWorkflow.operators import Attrib, Equal, PathAttrib
from SpiffWorkflow.serializer.dict import DictionarySerializer
class ASmallWorkflow(WorkflowSpec):
def __init__(self):
super(ASmallWorkflow, self).__init__(name="asmallworkflow", addstart=True)
multichoice = MultiChoice(self, 'multi_choice_1')
self.start.connect(multichoice)
a1 = Simple(self, 'task_a1')
multichoice.connect(a1)
a2 = Simple(self, 'task_a2')
cond = Equal(Attrib('test_attribute1'), PathAttrib('test/attribute2'))
multichoice.connect_if(cond, a2)
syncmerge = Join(self, 'struct_synch_merge_1', 'multi_choice_1')
a1.connect(syncmerge)
a2.connect(syncmerge)
end = Simple(self, 'End')
syncmerge.connect(end)
class PersistSmallWorkflowTest(unittest.TestCase):
"""Runs persistency tests agains a small and easy to inspect workflowdefinition"""
def setUp(self):
self.wf_spec = ASmallWorkflow()
self.workflow = self._advance_to_a1(self.wf_spec)
def _advance_to_a1(self, wf_spec):
workflow = Workflow(wf_spec)
tasks = workflow.get_tasks(state=TaskState.READY)
task_start = tasks[0]
workflow.run_task_from_id(task_start.id)
tasks = workflow.get_tasks(state=TaskState.READY)
multichoice = tasks[0]
workflow.run_task_from_id(multichoice.id)
tasks = workflow.get_tasks(state=TaskState.READY)
task_a1 = tasks[0]
workflow.run_task_from_id(task_a1.id)
return workflow
def testDictionarySerializer(self):
"""
Tests the SelectivePickler serializer for persisting Workflows and Tasks.
"""
old_workflow = self.workflow
serializer = DictionarySerializer()
serialized_workflow = old_workflow.serialize(serializer)
serializer = DictionarySerializer()
new_workflow = Workflow.deserialize(serializer, serialized_workflow)
before = old_workflow.get_dump()
after = new_workflow.get_dump()
self.assertEqual(before, after)
def testDeserialization(self):
"""
Tests the that deserialized workflow matches the original workflow
"""
old_workflow = self.workflow
old_workflow.spec.start.set_data(marker=True)
serializer = DictionarySerializer()
serialized_workflow = old_workflow.serialize(serializer)
serializer = DictionarySerializer()
new_workflow = Workflow.deserialize(serializer, serialized_workflow)
self.assertEqual(
len(new_workflow.get_tasks()), len(old_workflow.get_tasks()))
self.assertEqual(new_workflow.spec.start.get_data(
'marker'), old_workflow.spec.start.get_data('marker'))
self.assertEqual(1, len([t for t in new_workflow.get_tasks() if t.task_spec.name == 'Start']))
def testCompleteAfterDeserialization(self):
"""
Tests the that deserialized workflow can be completed.
"""
old_workflow = self.workflow
old_workflow.run_next()
self.assertEqual('task_a2', old_workflow.last_task.task_spec.name)
serializer = DictionarySerializer()
serialized_workflow = old_workflow.serialize(serializer)
serializer = DictionarySerializer()
new_workflow = Workflow.deserialize(serializer, serialized_workflow)
self.assertEqual('task_a2', old_workflow.last_task.task_spec.name)
new_workflow.run_all()
self.assertEqual('task_a2', old_workflow.last_task.task_spec.name)
| 3,722 | Python | .py | 78 | 39.358974 | 102 | 0.685738 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
856 | docTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/docTest.py | # -*- coding: utf-8 -*-
import sys
import unittest
import os
dirname = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(dirname, '..', '..', '..'))
doc_dir = os.path.join(dirname, '..', '..', '..', 'doc')
class TutorialTest(object):
"""
Tests the examples that are included in the docs.
"""
tutorial_dir = None
def setUp(self):
os.chdir(self.tutorial_dir)
sys.path.insert(0, self.tutorial_dir)
def tearDown(self):
sys.path.pop(0)
os.chdir(dirname)
def testTutorial(self):
from start import workflow
self.assertTrue(workflow.is_completed())
class Tutorial1Test(TutorialTest, unittest.TestCase):
tutorial_dir = os.path.join(doc_dir, 'core', 'tutorial')
class Tutorial2Test(TutorialTest, unittest.TestCase):
tutorial_dir = os.path.join(doc_dir, 'core', 'custom-tasks')
def suite():
tests = unittest.TestLoader().loadTestsFromTestCase(Tutorial1Test)
tests.addTests(
unittest.defaultTestLoader.loadTestsFromTestCase(Tutorial2Test))
return tests
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,165 | Python | .py | 32 | 31.59375 | 72 | 0.680965 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
857 | ExecuteProcessMock.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/ExecuteProcessMock.py | # -*- coding: utf-8 -*-
import time
def main():
time.sleep(0.5)
print("127.0.0.1")
if __name__ == "__main__":
main()
| 133 | Python | .py | 7 | 15.714286 | 26 | 0.516393 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
858 | WorkflowTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/WorkflowTest.py | import unittest
import os
from datetime import datetime
from lxml import etree
from SpiffWorkflow import TaskState, Workflow
from SpiffWorkflow.specs import Cancel, Simple, WorkflowSpec
from SpiffWorkflow.serializer.prettyxml import XmlSerializer
data_dir = os.path.join(os.path.dirname(__file__), 'data')
class WorkflowTest(unittest.TestCase):
def setUp(self):
xml_file = os.path.join(data_dir, 'workflow1.xml')
with open(xml_file) as fp:
xml = etree.parse(fp).getroot()
wf_spec = WorkflowSpec.deserialize(XmlSerializer(), xml)
self.workflow = Workflow(wf_spec)
def test_interactive_calls(self):
"""Simulates interactive calls, as would be issued by a user."""
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].task_spec.name, 'Start')
self.workflow.run_task_from_id(tasks[0].id)
self.assertEqual(tasks[0].state, TaskState.COMPLETED)
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 2)
task_a1 = tasks[0]
task_b1 = tasks[1]
self.assertEqual(task_a1.task_spec.__class__, Simple)
self.assertEqual(task_a1.task_spec.name, 'task_a1')
self.assertEqual(task_b1.task_spec.__class__, Simple)
self.assertEqual(task_b1.task_spec.name, 'task_b1')
self.workflow.run_task_from_id(task_a1.id)
self.assertEqual(task_a1.state, TaskState.COMPLETED)
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 2)
self.assertTrue(task_b1 in tasks)
task_a2 = tasks[0]
self.assertEqual(task_a2.task_spec.__class__, Simple)
self.assertEqual(task_a2.task_spec.name, 'task_a2')
self.workflow.run_task_from_id(task_a2.id)
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 1)
self.assertTrue(task_b1 in tasks)
self.workflow.run_task_from_id(task_b1.id)
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 1)
self.workflow.run_task_from_id(tasks[0].id)
tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].task_spec.name, 'synch_1')
| 2,383 | Python | .py | 49 | 40.938776 | 72 | 0.682171 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
859 | TaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/TaskTest.py | import unittest
import re
from SpiffWorkflow.task import Task, TaskState, TaskIterator
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.specs.Simple import Simple
class MockWorkflow(object):
def __init__(self, spec):
self.spec = spec
self.tasks = {}
class TaskTest(unittest.TestCase):
def setUp(self):
Task.id_pool = 0
Task.thread_id_pool = 0
def testTree(self):
# Build a tree.
spec = WorkflowSpec(name='Mock Workflow')
workflow = MockWorkflow(spec)
task1 = Simple(spec, 'Simple 1')
task2 = Simple(spec, 'Simple 2')
task3 = Simple(spec, 'Simple 3')
task4 = Simple(spec, 'Simple 4')
task5 = Simple(spec, 'Simple 5')
task6 = Simple(spec, 'Simple 6')
task7 = Simple(spec, 'Simple 7')
task8 = Simple(spec, 'Simple 8')
task9 = Simple(spec, 'Simple 9')
root = Task(workflow, task1)
c1 = root._add_child(task2)
c11 = c1._add_child(task3)
c111 = c11._add_child(task4)
Task(workflow, task5, c111)
Task(workflow, task6, c11)
Task(workflow, task7, c1)
Task(workflow, task8, root)
c3 = Task(workflow, task9, root)
c3.state = TaskState.COMPLETED
# Check whether the tree is built properly.
expected = """!/0: Task of Simple 1 State: MAYBE Children: 3
!/0: Task of Simple 2 State: MAYBE Children: 2
!/0: Task of Simple 3 State: MAYBE Children: 2
!/0: Task of Simple 4 State: MAYBE Children: 1
!/0: Task of Simple 5 State: MAYBE Children: 0
!/0: Task of Simple 6 State: MAYBE Children: 0
!/0: Task of Simple 7 State: MAYBE Children: 0
!/0: Task of Simple 8 State: MAYBE Children: 0
!/0: Task of Simple 9 State: COMPLETED Children: 0"""
expected = re.compile(expected.replace('!', r'([0-9a-f\-]+)'))
self.assertTrue(expected.match(root.get_dump()),
'Expected:\n' + repr(expected.pattern) + '\n' +
'but got:\n' + repr(root.get_dump()))
# Now remove one line from the expected output for testing the
# filtered iterator.
expected2 = ''
for line in expected.pattern.split('\n'):
if line.find('Simple 9') >= 0:
continue
expected2 += line.lstrip() + '\n'
expected2 = re.compile(expected2)
# Run the iterator test.
result = ''
for thetask in TaskIterator(root, state=TaskState.MAYBE):
result += thetask.get_dump(0, False) + '\n'
self.assertTrue(expected2.match(result),
'Expected:\n' + repr(expected2.pattern) + '\n' +
'but got:\n' + repr(result))
| 2,771 | Python | .py | 65 | 33.692308 | 72 | 0.595697 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
860 | DataPatternTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/DataPatternTest.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from .pattern_base import WorkflowPatternTestCase
class TaskDataTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('data/task_data')
class BlockDataTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('data/block_data')
class TaskToTaskTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('data/task_to_task')
class BlockToSubworkflowTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('data/block_to_subworkflow')
class SubworkflowToBlockTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('data/subworkflow_to_block') | 763 | Python | .py | 18 | 37.777778 | 64 | 0.758108 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
861 | ControlFlowPatternTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/ControlFlowPatternTest.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from .pattern_base import WorkflowPatternTestCase
# This combines the old pattern tests with the old serializer tests, creating one test per pattern
# that tests the tasks in it can be serialized with our serializers and the workflows run with the
# expected output. This format is a little annoying (inheriting from two classes with the actual
# work being done in the secondary class); however, this is the most concise thing I could manage.
#
# There were also a fair amount of never-used options in those tests, so the tests in the base case
# are a lot simpler than the ones they replaced.
class SequenceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/sequence')
class ParallelSplitTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/parallel_split')
class SynchronizationTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/synchronization')
class ExclusiveChoiceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/exclusive_choice')
class SimpleMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/simple_merge')
class MultiChoiceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/multi_choice')
class StructuredSynchronizingMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/structured_synchronizing_merge')
class MultiMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/multi_merge')
class StructuredDiscriminatorTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/structured_discriminator')
class BlockingDiscriminatorTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/blocking_discriminator')
class CancellingDiscriminatorTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancelling_discriminator')
class StructuredPartialJoin(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/structured_partial_join')
class BlockingPartialJoin(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/blocking_partial_join')
class CancellingPartialJoin(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancelling_partial_join')
class GeneralizedAndJoin(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/generalized_and_join')
class LocalSynchronizingMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/acyclic_synchronizing_merge')
class GeneralSynchronizingMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/general_synchronizing_merge')
class ThreadMergeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/thread_merge')
class ThreadSplitTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/thread_split')
class MultiInstanceWithoutSynchonizationTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/multi_instance_without_synch')
class MultiInstanceWithDesignTimeKnowledgeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/multi_instance_with_a_priori_design_time_knowledge')
class MultiInstanceWithRunTimeKnowledgeTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/multi_instance_with_a_priori_run_time_knowledge')
class StaticPartialJoinMultiInstanceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/static_partial_join_for_multi_instance')
class CancellingPartialJoinMultiInstanceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancelling_partial_join_for_multi_instance')
class DynamicPartialJoinMultiInstanceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/dynamic_partial_join_for_multi_instance')
class DeferredChoiceTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/deferred_choice')
class InterleavedParallelRoutingTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/interleaved_parallel_routing')
class MilestoneTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/milestone')
class CriticalSectionTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/critical_section')
class InterleavedRoutingTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/interleaved_routing')
class CancelTaskTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancel_task')
class CancelCaseTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancel_case')
class CancelRegionTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancel_region')
class CancelMultiInstanceTaskTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/cancel_multi_instance_task')
class CompleteMultiInstanceTaskTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/complete_multiple_instance_activity')
class ArbitraryCyclesTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/arbitrary_cycles')
class RecursionTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/recursion')
def test_run_workflow(self):
pass
class ImplicitTerminationTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/implicit_termination')
class ExplicitTerminationTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/explicit_termination')
class TransientTriggerTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/transient_trigger')
class PersistentTriggerTest(TestCase, WorkflowPatternTestCase):
def setUp(self):
self.load_from_xml('control-flow/persistent_trigger')
| 6,978 | Python | .py | 135 | 46.62963 | 99 | 0.779706 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
862 | workflow1.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/data/workflow1.py | # -*- coding: utf-8 -*-
from SpiffWorkflow.specs.ExclusiveChoice import ExclusiveChoice
from SpiffWorkflow.specs.Join import Join
from SpiffWorkflow.specs.MultiChoice import MultiChoice
from SpiffWorkflow.specs.MultiInstance import MultiInstance
from SpiffWorkflow.specs.Simple import Simple
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.operators import Attrib, Equal, NotEqual
class TestWorkflowSpec(WorkflowSpec):
def __init__(self):
WorkflowSpec.__init__(self)
# Build one branch.
a1 = Simple(self, 'task_a1')
self.start.connect(a1)
a2 = Simple(self, 'task_a2')
a1.connect(a2)
# Build another branch.
b1 = Simple(self, 'task_b1')
self.start.connect(b1)
b2 = Simple(self, 'task_b2')
b1.connect(b2)
# Merge both branches (synchronized).
synch_1 = Join(self, 'synch_1')
a2.connect(synch_1)
b2.connect(synch_1)
# If-condition that does not match.
excl_choice_1 = ExclusiveChoice(self, 'excl_choice_1')
synch_1.connect(excl_choice_1)
c1 = Simple(self, 'task_c1')
excl_choice_1.connect(c1)
c2 = Simple(self, 'task_c2')
cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute2'))
excl_choice_1.connect_if(cond, c2)
c3 = Simple(self, 'task_c3')
excl_choice_1.connect_if(cond, c3)
# If-condition that matches.
excl_choice_2 = ExclusiveChoice(self, 'excl_choice_2')
c1.connect(excl_choice_2)
c2.connect(excl_choice_2)
c3.connect(excl_choice_2)
d1 = Simple(self, 'task_d1')
excl_choice_2.connect(d1)
d2 = Simple(self, 'task_d2')
excl_choice_2.connect_if(cond, d2)
d3 = Simple(self, 'task_d3')
cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute1'))
excl_choice_2.connect_if(cond, d3)
# If-condition that does not match.
multichoice = MultiChoice(self, 'multi_choice_1')
d1.connect(multichoice)
d2.connect(multichoice)
d3.connect(multichoice)
e1 = Simple(self, 'task_e1')
multichoice.connect_if(cond, e1)
e2 = Simple(self, 'task_e2')
cond = Equal(Attrib('test_attribute1'), Attrib('test_attribute2'))
multichoice.connect_if(cond, e2)
e3 = Simple(self, 'task_e3')
cond = Equal(Attrib('test_attribute2'), Attrib('test_attribute2'))
multichoice.connect_if(cond, e3)
# StructuredSynchronizingMerge
syncmerge = Join(self, 'struct_synch_merge_1', 'multi_choice_1')
e1.connect(syncmerge)
e2.connect(syncmerge)
e3.connect(syncmerge)
# Implicit parallel split.
f1 = Simple(self, 'task_f1')
syncmerge.connect(f1)
f2 = Simple(self, 'task_f2')
syncmerge.connect(f2)
f3 = Simple(self, 'task_f3')
syncmerge.connect(f3)
# Discriminator
discrim_1 = Join(self,
'struct_discriminator_1',
'struct_synch_merge_1',
threshold=1)
f1.connect(discrim_1)
f2.connect(discrim_1)
f3.connect(discrim_1)
# Loop back to the first exclusive choice.
excl_choice_3 = ExclusiveChoice(self, 'excl_choice_3')
discrim_1.connect(excl_choice_3)
cond = NotEqual(Attrib('excl_choice_3_reached'), Attrib('two'))
excl_choice_3.connect_if(cond, excl_choice_1)
# Split into 3 branches, and implicitly split twice in addition.
multi_instance_1 = MultiInstance(self, 'multi_instance_1', times=3)
excl_choice_3.connect(multi_instance_1)
# Parallel tasks.
g1 = Simple(self, 'task_g1')
g2 = Simple(self, 'task_g2')
multi_instance_1.connect(g1)
multi_instance_1.connect(g2)
# StructuredSynchronizingMerge
syncmerge2 = Join(self, 'struct_synch_merge_2', 'multi_instance_1')
g1.connect(syncmerge2)
g2.connect(syncmerge2)
# Add a final task.
last = Simple(self, 'last')
syncmerge2.connect(last)
# Add another final task :-).
end = Simple(self, 'End')
last.connect(end)
| 4,284 | Python | .py | 103 | 32.533981 | 75 | 0.622892 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
863 | ExecuteTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/ExecuteTest.py | import os
import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.specs.Execute import Execute
from .TaskSpecTest import TaskSpecTest
from ..util import run_workflow
class ExecuteTest(TaskSpecTest):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
return Execute(self.wf_spec,
'testtask',
description='foo',
args=self.cmd_args)
def setUp(self):
script_path = os.path.join(os.path.dirname(__file__), '..', 'ExecuteProcessMock.py')
self.cmd_args = ["python", script_path]
TaskSpecTest.setUp(self)
def testConstructor(self):
TaskSpecTest.testConstructor(self)
self.assertEqual(self.spec.args, self.cmd_args)
def testPattern(self):
"""
Tests that we can create a task that executes a shell command
and that the workflow can be called to complete such tasks.
"""
self.wf_spec.start.connect(self.spec)
expected = 'Start\n testtask\n'
workflow = run_workflow(self, self.wf_spec, expected, '')
task = self.get_first_task_from_spec_name(workflow, 'testtask')
self.assertEqual(task.state, TaskState.COMPLETED)
self.assertIn(b'127.0.0.1', task.results[0])
| 1,361 | Python | .py | 32 | 34.03125 | 92 | 0.653555 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
864 | TaskSpecTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/TaskSpecTest.py | import unittest
from SpiffWorkflow.specs import Join, Simple, WorkflowSpec
from SpiffWorkflow.exceptions import WorkflowException
from SpiffWorkflow.specs.base import TaskSpec
from SpiffWorkflow.serializer.dict import DictionarySerializer
class TaskSpecTest(unittest.TestCase):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
return TaskSpec(self.wf_spec, 'testtask', description='foo')
def get_first_task_from_spec_name(self, workflow, spec_name):
return workflow.get_next_task(spec_name=spec_name)
def setUp(self):
self.wf_spec = WorkflowSpec(addstart=True)
self.spec = self.create_instance()
def testConstructor(self):
self.assertEqual(self.spec.name, 'testtask')
self.assertEqual(self.spec.description, 'foo')
self.assertEqual(self.spec.data, {})
self.assertEqual(self.spec.defines, {})
self.assertEqual(self.spec.pre_assign, [])
self.assertEqual(self.spec.post_assign, [])
def testSetData(self):
self.assertEqual(self.spec.get_data('foo'), None)
self.assertEqual(self.spec.get_data('foo', 'bar'), 'bar')
self.spec.set_data(foo='foobar')
self.assertEqual(self.spec.get_data('foo'), 'foobar')
self.assertEqual(self.spec.get_data('foo', 'bar'), 'foobar')
def testGetData(self):
return self.testSetData()
def testConnect(self):
self.assertEqual(self.spec._outputs, [])
self.assertEqual(self.spec._inputs, [])
spec = self.create_instance()
self.spec.connect(spec)
self.assertEqual(self.spec._outputs, [spec.name])
self.assertEqual(spec._inputs, [self.spec.name])
def testTest(self):
# Should fail because the TaskSpec has no id yet.
spec = self.create_instance()
self.assertRaises(WorkflowException, spec.test)
# Should fail because the task has no inputs.
self.spec.id = 1
self.assertRaises(WorkflowException, spec.test)
# Connect another task to make sure that it has an input.
self.spec.connect(spec)
self.assertEqual(spec.test(), None)
def testSerialize(self):
serializer = DictionarySerializer()
spec = self.create_instance()
try:
serialized = spec.serialize(serializer)
self.assertIsInstance(serialized, dict)
except NotImplementedError:
self.assertIsInstance(spec, TaskSpec)
self.assertRaises(NotImplementedError,
spec.__class__.deserialize, None, None, None)
return
new_wf_spec = WorkflowSpec()
new_spec = spec.__class__.deserialize(serializer, new_wf_spec,
serialized)
before = spec.serialize(serializer)
after = new_spec.serialize(serializer)
self.assertEqual(before, after, 'Before:\n%s\nAfter:\n%s\n' % (before, after))
def testAncestors(self):
T1 = Simple(self.wf_spec, 'T1')
T2A = Simple(self.wf_spec, 'T2A')
T2B = Simple(self.wf_spec, 'T2B')
M = Join(self.wf_spec, 'M')
T3 = Simple(self.wf_spec, 'T3')
self.wf_spec.start.connect(T1)
T1.connect(T2A)
T1.connect(T2B)
T2A.connect(M)
T2B.connect(M)
M.connect(T3)
self.assertEqual(T1.ancestors(), [self.wf_spec.start])
self.assertEqual(T2A.ancestors(), [T1, self.wf_spec.start])
self.assertEqual(T2B.ancestors(), [T1, self.wf_spec.start])
self.assertEqual(M.ancestors(), [T2A, T1, self.wf_spec.start, T2B])
self.assertEqual(len(T3.ancestors()), 5)
def test_ancestors_cyclic(self):
T1 = Join(self.wf_spec, 'T1')
T2 = Simple(self.wf_spec, 'T2')
self.wf_spec.start.connect(T1)
T1.connect(T2)
self.assertEqual(T1.ancestors(), [self.wf_spec.start])
self.assertEqual(T2.ancestors(), [T1, self.wf_spec.start])
| 4,042 | Python | .py | 88 | 36.772727 | 86 | 0.642167 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
865 | TransformTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/TransformTest.py | from ..util import run_workflow
from .TaskSpecTest import TaskSpecTest
from SpiffWorkflow.specs import Transform, Simple
class TransformTest(TaskSpecTest):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
return Transform(self.wf_spec, 'testtask', description='foo', transforms=[''])
def testPattern(self):
"""
Tests that we can create a task that executes a shell command
and that the workflow can be called to complete such tasks.
"""
task1 = Transform(self.wf_spec, 'First', transforms=["my_task.set_data(foo=1)"])
self.wf_spec.start.connect(task1)
task2 = Transform(self.wf_spec, 'Second', transforms=[
"my_task.set_data(foo=my_task.data['foo']+1)",
"my_task.set_data(copy=my_task.data['foo'])"
])
task1.connect(task2)
task3 = Simple(self.wf_spec, 'Last')
task2.connect(task3)
expected = 'Start\n First\n Second\n Last\n'
workflow = run_workflow(self, self.wf_spec, expected, '')
first = self.get_first_task_from_spec_name(workflow, 'First')
last = self.get_first_task_from_spec_name(workflow, 'Last')
self.assertEqual(first.data.get('foo'), 1)
self.assertEqual(last.data.get('foo'), 2)
self.assertEqual(last.data.get('copy'), 2)
| 1,413 | Python | .py | 29 | 40.482759 | 88 | 0.641509 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
866 | WorkflowSpecTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/WorkflowSpecTest.py | import unittest
import os
import pickle
from lxml import etree
from random import randint
from SpiffWorkflow import Workflow
from SpiffWorkflow.specs import Join, WorkflowSpec
from SpiffWorkflow.serializer.prettyxml import XmlSerializer
from ..util import track_workflow
data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
serializer = XmlSerializer()
data_file = 'data.pkl'
class WorkflowSpecTest(unittest.TestCase):
def setUp(self):
self.wf_spec = WorkflowSpec(addstart=True)
def testConstructor(self):
spec = WorkflowSpec('my spec', addstart=True)
self.assertEqual('my spec', spec.name)
def testGetTaskSpecFromName(self):
pass # FIXME
def testGetDump(self):
pass # FIXME
def testDump(self):
pass # FIXME
def doPickleSingle(self, workflow, expected_path):
taken_path = track_workflow(workflow.spec)
# Execute a random number of steps.
for i in range(randint(0, len(workflow.spec.task_specs))):
workflow.run_next()
# Store the workflow instance in a file.
with open(data_file, 'wb') as fp:
pickle.dump(workflow, fp, -1)
before = workflow.get_dump()
# Load the workflow instance from a file and delete the file.
with open(data_file, 'rb') as fp:
workflow = pickle.load(fp)
os.remove(data_file)
after = workflow.get_dump()
# Make sure that the state of the workflow did not change.
self.assertEqual(before, after)
# Re-connect signals, because the pickle dump now only contains a
# copy of taken_path.
taken_path = track_workflow(workflow.spec, taken_path)
# Run the rest of the workflow.
workflow.run_all()
after = workflow.get_dump()
self.assertTrue(workflow.is_completed(), 'Workflow not complete:' + after)
self.assertEqual(expected_path, taken_path)
def testSerialize(self):
# Read a complete workflow spec.
xml_file = os.path.join(data_dir, 'workflow1.xml')
with open(xml_file) as fp:
xml = etree.parse(fp).getroot()
path_file = os.path.splitext(xml_file)[0] + '.path'
with open(path_file) as fp:
expected_path = fp.read().strip().split('\n')
wf_spec = WorkflowSpec.deserialize(serializer, xml)
for i in range(5):
workflow = Workflow(wf_spec)
self.doPickleSingle(workflow, expected_path)
def testValidate(self):
"""
Tests that we can detect when two wait tasks are waiting on each
other.
"""
task1 = Join(self.wf_spec, 'First')
self.wf_spec.start.connect(task1)
task2 = Join(self.wf_spec, 'Second')
task1.connect(task2)
task1.connect(task2)
task2.connect(task1)
results = self.wf_spec.validate()
self.assertIn("Found loop with 'Second': Second->First then 'Second' "
"again", results)
self.assertIn("Found loop with 'First': First->Second then 'First' "
"again", results)
def testGetTaskSpecFromId(self):
pass
| 3,197 | Python | .py | 78 | 32.730769 | 82 | 0.641448 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
867 | MergeTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/MergeTest.py | from SpiffWorkflow import Workflow
from SpiffWorkflow.specs import Merge, Simple, WorkflowSpec
from .JoinTest import JoinTest
class MergeTest(JoinTest):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
return Merge(self.wf_spec, 'testtask', description='foo')
def test_Merge_data_merging(self):
"""Test that Merge task actually merges data"""
wf_spec = WorkflowSpec(addstart=True)
first = Simple(wf_spec, 'first')
second = Simple(wf_spec, 'second')
third = Simple(wf_spec, 'third')
bump = Simple(wf_spec, 'bump')
fourth = Simple(wf_spec, 'fourth')
merge1 = Merge(wf_spec, 'merge 1')
simple1 = Simple(wf_spec, 'simple 1')
merge2 = Merge(wf_spec, 'merge 2')
simple2 = Simple(wf_spec, 'simple 2')
unmerged = Simple(wf_spec, 'unmerged')
wf_spec.start.connect(first)
wf_spec.start.connect(second)
wf_spec.start.connect(third)
wf_spec.start.connect(bump)
bump.connect(fourth) # Test join at different depths in tree
first.connect(merge1)
second.connect(merge1)
second.connect(unmerged)
first.connect(merge2)
second.connect(merge2)
third.connect(merge2)
fourth.connect(merge2)
merge1.connect(simple1)
merge2.connect(simple2)
workflow = Workflow(wf_spec)
workflow.task_tree.set_data(everywhere=1)
for task in workflow.get_tasks():
task.set_data(**{'name': task.task_spec.name, task.task_spec.name: 1})
workflow.run_all()
self.assertTrue(workflow.is_completed())
found = {}
for task in workflow.get_tasks():
if task.task_spec is simple1:
self.assertIn('first', task.data)
self.assertIn('second', task.data)
self.assertEqual(task.data, {'everywhere': 1, 'Start': 1,
'merge 1': 1, 'name': 'Start', 'simple 1': 1,
'second': 1, 'first': 1})
found['simple1'] = task
if task.task_spec is simple2:
self.assertIn('first', task.data)
self.assertIn('second', task.data)
self.assertIn('third', task.data)
self.assertIn('fourth', task.data)
self.assertEqual(task.data, {'everywhere': 1, 'merge 2': 1,
'simple 2': 1, 'name': 'Start', 'third': 1, 'bump': 1,
'Start': 1, 'second': 1, 'first': 1, 'fourth': 1})
found['simple2'] = task
if task.task_spec is unmerged:
self.assertEqual(task.data, {'everywhere': 1, 'Start': 1,
'second': 1, 'name': 'Start', 'unmerged': 1})
found['unmerged'] = task
self.assertIn('simple1', found)
self.assertIn('simple2', found)
self.assertIn('unmerged', found)
| 3,128 | Python | .py | 66 | 34.272727 | 99 | 0.552278 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
868 | SubWorkflowTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/SubWorkflowTest.py | import unittest
import os
from lxml import etree
from SpiffWorkflow import TaskState, Workflow
from SpiffWorkflow.specs import WorkflowSpec
from SpiffWorkflow.serializer.prettyxml import XmlSerializer
class TaskSpecTest(unittest.TestCase):
def testConstructor(self):
pass # FIXME
def testSerialize(self):
pass # FIXME
def testTest(self):
pass # FIXME
def load_workflow_spec(self, folder, f):
file = os.path.join(
os.path.dirname(__file__), '..', 'data', folder, f)
serializer = XmlSerializer()
with open(file) as fp:
xml = etree.parse(fp).getroot()
self.wf_spec = WorkflowSpec.deserialize(
serializer, xml, filename=file)
self.workflow = Workflow(self.wf_spec)
def do_next_unique_task(self, name):
# This method asserts that there is only one ready task! The specified
# one - and then completes it
self.workflow.update_waiting_tasks()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(1, len(ready_tasks))
task = ready_tasks[0]
self.assertEqual(name, task.task_spec.name)
task.run()
def do_next_named_step(self, name, other_ready_tasks):
# This method completes a single task from the specified set of ready
# tasks
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
all_tasks = sorted([name] + other_ready_tasks)
self.assertEqual(all_tasks, sorted([t.task_spec.name for t in ready_tasks]))
task = list([t for t in ready_tasks if t.task_spec.name == name])[0]
task.run()
def test_block_to_subworkflow(self):
self.load_workflow_spec('data', 'block_to_subworkflow.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
# Inner. The subworkflow task will complete automatically after the subwokflow completes
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
# Back to outer:
self.do_next_unique_task('last')
self.do_next_unique_task('End')
def test_subworkflow_to_block(self):
self.load_workflow_spec('data', 'subworkflow_to_block.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
# Back to outer:
self.do_next_unique_task('last')
self.do_next_unique_task('End')
def test_subworkflow_to_join(self):
self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
# The subworkflow task now sets its child tasks to READY and waits
self.do_next_named_step('second', ['Start'])
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
# Back to outer:
self.do_next_unique_task('join')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
def test_subworkflow_to_join_refresh_waiting(self):
self.load_workflow_spec('control-flow', 'subworkflow_to_join.xml')
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
self.do_next_named_step('second', ['Start'])
# Inner:
self.do_next_unique_task('Start')
self.do_next_unique_task('first')
# Update the state of every WAITING task.
self.workflow.update_waiting_tasks()
self.do_next_unique_task('last')
self.do_next_unique_task('End')
# Back to outer:
self.do_next_unique_task('join')
self.do_next_unique_task('last')
self.do_next_unique_task('End')
| 4,069 | Python | .py | 94 | 34.957447 | 97 | 0.639858 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
869 | JoinTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/core/specs/JoinTest.py | from SpiffWorkflow.specs.Join import Join
from .TaskSpecTest import TaskSpecTest
class JoinTest(TaskSpecTest):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
return Join(self.wf_spec, 'testtask', description='foo')
| 313 | Python | .py | 7 | 38.571429 | 64 | 0.725166 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
870 | MultiInstanceTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/MultiInstanceTaskTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class MultiInstanceTaskTest(BaseTestCase):
def testMultiInstanceTask(self):
spec, subprocesses = self.load_workflow_spec('spiff_multiinstance.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.task_tree
start.data = {'input_data': [1, 2, 3]}
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(spec_name='any_task')
self.workflow.do_engine_steps()
self.save_restore()
ready_tasks = self.get_ready_user_tasks()
for task in ready_tasks:
task.data['output_item'] = task.data['input_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {
'input_data': [2, 3, 4], # Prescript adds 1 to input
'output_data': [3, 5, 7], # Postscript subtracts 1 from output
})
def testMultiInstanceTaskWithInstanceScripts(self):
spec, subprocesses = self.load_workflow_spec('script_on_mi.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.get_next_task(spec_name='Start')
start.data = {'input_data': [1, 2, 3]}
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(spec_name='any_task')
self.workflow.do_engine_steps()
self.save_restore()
ready_tasks = self.get_ready_user_tasks()
for task in ready_tasks:
task.data['output_item'] = task.data['input_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3], # Prescript modifies input item
'output_data': [3, 5, 7],
}) | 1,982 | Python | .py | 41 | 38.902439 | 93 | 0.633351 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
871 | ServiceTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/ServiceTaskTest.py | import json
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class ServiceTaskDelegate:
@staticmethod
def call_connector(name, params, task_data):
if name == 'bamboohr/GetPayRate':
assertEqual(len(params), 3)
assertEqual(params['api_key']['value'], 'secret:BAMBOOHR_API_KEY')
assertEqual(params['employee_id']['value'], 4)
assertEqual(params['subdomain']['value'], 'ServiceTask')
elif name == 'weather/CurrentTemp':
assertEqual(len(params), 1)
assertEqual(params['zipcode']['value'], 22980)
else:
raise AssertionError('unexpected connector name')
if name == 'bamboohr/GetPayRate':
sample_response = {
"amount": "65000.00",
"currency": "USD",
"id": "4",
"payRate": "65000.00 USD",
}
elif name == 'weather/CurrentTemp':
sample_response = {
"temp": "72F",
}
return json.dumps(sample_response)
class ExampleCustomScriptEngine(PythonScriptEngine):
def call_service(self, operation_name, operation_params, task_data):
return ServiceTaskDelegate.call_connector(operation_name, operation_params,
task_data)
class ServiceTaskTest(BaseTestCase):
def setUp(self):
global assertEqual
assertEqual = self.assertEqual
spec, subprocesses = self.load_workflow_spec('service_task.bpmn',
'service_task_example1')
self.script_engine = ExampleCustomScriptEngine()
self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=self.script_engine)
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
self._assert_service_tasks()
def testRunSameServiceTaskActivityMultipleTimes(self):
self.workflow.do_engine_steps()
service_task_activity = [t for t in self.workflow.get_tasks() if
t.task_spec.name == 'Activity-1inxqgx'][0]
service_task_activity.task_spec._execute(service_task_activity)
service_task_activity.task_spec._execute(service_task_activity)
service_task_activity.task_spec._execute(service_task_activity)
def testRunThroughSaveRestore(self):
self.save_restore()
# Engine isn't preserved through save/restore, so we have to reset it.
self.workflow.script_engine = self.script_engine
self.workflow.do_engine_steps()
self.save_restore()
self._assert_service_tasks()
def _assert_service_tasks(self):
# service task without result variable name specified, mock
# bamboohr/GetPayRate response
result = self.workflow.data['spiff__Activity_1inxqgx_result']
self.assertEqual(len(result), 4)
self.assertEqual(result['amount'], '65000.00')
self.assertEqual(result['currency'], 'USD')
self.assertEqual(result['id'], '4')
self.assertEqual(result['payRate'], '65000.00 USD')
# service task with result variable specified, mock weather response
result = self.workflow.data['waynesboroWeatherResult']
self.assertEqual(len(result), 1)
self.assertEqual(result['temp'], '72F')
| 3,386 | Python | .py | 71 | 37.943662 | 90 | 0.653531 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
872 | BaseTestCase.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/BaseTestCase.py | import os
from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer
from SpiffWorkflow.spiff.serializer import DEFAULT_CONFIG
from SpiffWorkflow.spiff.parser import SpiffBpmnParser, VALIDATOR
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
registry = BpmnWorkflowSerializer.configure(DEFAULT_CONFIG)
class BaseTestCase(BpmnWorkflowTestCase):
""" Provides some basic tools for loading up and parsing Spiff extensions"""
serializer = BpmnWorkflowSerializer(registry)
def load_workflow_spec(self, filename, process_name, dmn_filename=None, validate=True):
bpmn = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = SpiffBpmnParser(validator=VALIDATOR if validate else None)
parser.add_bpmn_files_by_glob(bpmn)
if dmn_filename is not None:
dmn = os.path.join(os.path.dirname(__file__), 'data', 'dmn', dmn_filename)
parser.add_dmn_files_by_glob(dmn)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses
def load_collaboration(self, filename, collaboration_name):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = SpiffBpmnParser(validator=VALIDATOR)
parser.add_bpmn_files_by_glob(f)
return parser.get_collaboration(collaboration_name)
def get_all_specs(self, filename):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = SpiffBpmnParser(validator=VALIDATOR)
parser.add_bpmn_files_by_glob(f)
return parser.find_all_specs()
| 1,670 | Python | .py | 29 | 50.655172 | 91 | 0.73117 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
873 | ScriptUnitTestExtensionsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/ScriptUnitTestExtensionsTest.py | from .BaseTestCase import BaseTestCase
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
# Assure we correctly parse and pass on the Spiffworkflow properties in
# an extension.
class ScriptUnitTestExtensionsTest(BaseTestCase):
def testTask(self):
self.task_test()
def testTaskSaveRestore(self):
self.task_test(True)
def task_test(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('script_task_with_unit_tests.bpmn',
'Process_ScriptTaskWithUnitTests')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
# unitTests should be a list of dicts
expected_unit_tests_wrapper_class = list
expected_unit_test_class = dict
script_with_unit_tests = [t for t in self.workflow.get_tasks() if
t.task_spec.name == 'script_with_unit_test_id'][0]
extensions = script_with_unit_tests.task_spec.extensions
unit_test_extensions = extensions['unitTests']
self.assertEqual(len(unit_test_extensions), 2)
self.assertIsInstance(unit_test_extensions, expected_unit_tests_wrapper_class)
first_unit_test = unit_test_extensions[0]
self.assertIsInstance(first_unit_test, expected_unit_test_class)
expected_first_unit_test = {
'id': 'sets_hey_to_true_if_hey_is_false',
'inputJson': '{"hey": false}', 'expectedOutputJson': '{"hey": true}'
}
self.assertDictEqual(first_unit_test, expected_first_unit_test)
| 1,645 | Python | .py | 32 | 41.65625 | 88 | 0.666667 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
874 | BusinessRuleTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/BusinessRuleTaskTest.py |
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class BusinessRuleTaskTest(BaseTestCase):
def testBusinessRule(self):
spec, subprocesses = self.load_workflow_spec('business_rule_task.bpmn', 'Process_bd2e724', 'business_rules.dmn')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 456 | Python | .py | 9 | 44.333333 | 120 | 0.75395 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
875 | CorrelationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/CorrelationTest.py | from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow import TaskState
from .BaseTestCase import BaseTestCase
class CorrelationTest(BaseTestCase):
def testMessagePayload(self):
self.actual_test(False)
def testMessagePayloadSaveRestore(self):
self.actual_test(True)
def actual_test(self,save_restore):
specs = self.get_all_specs('correlation.bpmn')
proc_1 = specs['proc_1']
self.workflow = BpmnWorkflow(proc_1, specs)
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
# Set up some data to evaluate the payload expression against
for idx, task in enumerate(self.get_ready_user_tasks()):
task.data['task_num'] = idx
task.data['task_name'] = f'subprocess {idx}'
task.data['extra_data'] = 'unused data'
task.run()
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
for task in ready_tasks:
self.assertEqual(task.task_spec.name, 'prepare_response')
response = 'OK' if task.data['source_task']['num'] else 'No'
task.data.update(response=response)
task.run()
self.workflow.do_engine_steps()
# If the messages were routed properly, the task number should match the response id
for task in self.workflow.get_tasks(spec_name='subprocess_end'):
self.assertEqual(task.data['response']['init_id'], task.data['task_num'])
self.assertEqual(task.data['response']['response'], 'OK' if task.data['task_num'] else 'No')
class DualConversationTest(BaseTestCase):
def testTwoCorrelatonKeys(self):
spec, subprocesses = self.load_workflow_spec('correlation_two_conversations.bpmn', 'message_send_process')
workflow = BpmnWorkflow(spec, subprocesses)
workflow.do_engine_steps()
messages = workflow.get_events()
self.assertEqual(len(messages), 2)
self.assertEqual('Message Send One', messages[0].event_definition.name)
self.assertEqual('Message Send Two', messages[1].event_definition.name)
self.assertIn('message_correlation_key_one', messages[0].correlations)
self.assertNotIn('message_correlation_key_one', messages[1].correlations)
self.assertIn('message_correlation_key_two', messages[1].correlations)
self.assertNotIn('message_correlation_key_two', messages[0].correlations)
class ReceiveCorrelationTest(BaseTestCase):
def testReceiveCorrelations(self):
self.actual_test()
def testReceiveCorrelationsSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('receive_correlations.bpmn', 'correlation-test')
self.workflow = BpmnWorkflow(spec, subprocesses)
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(state=TaskState.READY)
task.data.update(value_1='a', value_2='b')
task.run()
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.correlations, {'message': {'prop_1': 'a', 'prop_2': 'b'}})
waiting_task = self.workflow.get_next_task(state=TaskState.WAITING)
event_def = waiting_task.task_spec.event_definition
payload = {'msg_value_1': 'a', 'msg_value_2': 'b'}
correlations = event_def.calculate_correlations(
waiting_task.workflow.script_engine,
event_def.correlation_properties,
payload
)
event = BpmnEvent(event_def, payload, correlations)
self.workflow.catch(event)
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 3,818 | Python | .py | 74 | 42.608108 | 114 | 0.672298 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
876 | EventPayloadTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/EventPayloadTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class EventPayloadTest(BaseTestCase):
def testSignalEvent(self):
spec, subprocesses = self.load_workflow_spec('signal_event_payload.bpmn', 'event_test')
self.workflow = BpmnWorkflow(spec)
self.workflow.do_engine_steps()
self.save_restore()
set_data = self.workflow.get_next_task(spec_name='set_data')
# Throw event creates payload from v1 & v2
set_data.data = {'v1': 1, 'v2': 2, 'v3': 3}
set_data.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {
'v1': 1,
'v2': 2,
'v3': 3,
'result': {'r1': 1, 'r2': 2}
})
def testErrorEvent(self):
spec, subprocesses = self.load_workflow_spec('error_event_payload.bpmn', 'event_test')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
self.save_restore()
set_data = self.workflow.get_next_task(spec_name='set_data')
# Throw event creates payload from v1 & v2
set_data.data = {'error': True, 'payload': 'ERROR!'}
set_data.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertEqual(self.workflow.data, {'result': 'ERROR!'})
def testEscalationEvent(self):
spec, subprocesses = self.load_workflow_spec('escalation_event_payload.bpmn', 'event_test')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
self.save_restore()
set_data = self.workflow.get_next_task(spec_name='set_data')
# Throw event creates payload from v1 & v2
set_data.data = {'escalation': True, 'payload': 'ERROR!'}
set_data.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertEqual(self.workflow.data, {'result': 'ERROR!'}) | 2,057 | Python | .py | 44 | 38.113636 | 99 | 0.639622 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
877 | data_object_test.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/data_object_test.py | from .BaseTestCase import BaseTestCase
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
class DataObjectTest(BaseTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('data_object.bpmn', 'Process')
def test_can_get_category_from_data_object(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
category = self.workflow.spec.data_objects['obj_1'].category
self.assertEqual(category, 'obj_1_category')
self.save_restore()
| 518 | Python | .py | 10 | 45.6 | 93 | 0.740079 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
878 | SpiffPropertiesTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/SpiffPropertiesTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
# Assure we correctly parse and pass on the Spiffworkflow properties in
# an extension.
class SpiffPropertiesTest(BaseTestCase):
def testTask(self):
self.task_test()
def testTaskSaveRestore(self):
self.task_test(True)
def task_test(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('spiff_properties.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# The ready task's spec should contain extension properties
# with name/value pairs.
task = ready_tasks[0]
self.assertDictEqual({'formJsonSchemaFilename': 'my_json_jschema.json',
'formUiSchemaFilename': 'my_ui_jschema.json'},
task.task_spec.extensions['properties'])
| 1,063 | Python | .py | 23 | 38.434783 | 90 | 0.699226 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
879 | PrescriptPostscriptTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/PrescriptPostscriptTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.exceptions import SpiffWorkflowException
from .BaseTestCase import BaseTestCase
class PrescriptPostsciptTest(BaseTestCase):
def testTask(self):
self.task_test()
def testCallActivity(self):
self.call_activity_test()
def testTaskSaveRestore(self):
self.task_test(True)
def testCallActivitySaveRestore(self):
self.call_activity_test(True)
def testDataObject(self):
self.test_data_object()
def testDataObjectSaveRestore(self):
self.test_data_object(True)
def test_data_object(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('prescript_postscript_data_object.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
# Set a on the workflow and b in the first task.
self.workflow.data_objects['a'] = 1
self.set_process_data({'b': 2})
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# This execute the same script as task_test
ready_tasks[0].run()
# a should be removed, b should be unchanged, and c and z should be present (but not x & y)
self.assertDictEqual({'b': 2, 'c': 12, 'z': 6}, ready_tasks[0].data)
def task_test(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('prescript_postscript.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
if save_restore:
self.save_restore()
self.set_process_data({'a': 1, 'b': 2})
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# The prescript sets x, y = a * 2, b * 2 and creates the variable z = x + y
# The postscript sets c = z * 2 and deletes x and y
# a and b should remain unchanged, and c and z should be added
ready_tasks[0].run()
self.assertDictEqual({'a': 1, 'b': 2, 'c': 12, 'z': 6}, ready_tasks[0].data)
def test_for_error(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('prescript_postscript.bpmn', 'Process_1')
self.workflow = BpmnWorkflow(spec, subprocesses)
if save_restore:
self.save_restore()
self.workflow.get_tasks(state=TaskState.READY)
# Calling do-engine steps without setting variables will raise an exception.
with self.assertRaises(SpiffWorkflowException) as se:
self.workflow.do_engine_steps()
ex = se.exception
self.assertIn("Error occurred in the Pre-Script", str(ex))
task = self.workflow.get_next_task(spec_name='Activity_1iqs4li')
self.assertEqual(task.state, TaskState.ERROR)
def call_activity_test(self, save_restore=False):
spec, subprocesses = self.load_workflow_spec('prescript_postscript_*.bpmn', 'parent')
self.workflow = BpmnWorkflow(spec, subprocesses)
if save_restore:
self.save_restore()
# Set the data and proceed. The call activity needs in_data and creates out_data
# The prescript sets in_data = old and creates out_data; the postscript copies out_data into new
# in_data and out_data remain (they're created my the calling task NOT the subprocess) and
# we did not explicitly remove them. We don't implicitly remove them because this would be
# the wrong behavior for regular tasks.
self.set_process_data({'old': 'hello'})
task = self.workflow.get_next_task(spec_name='Activity_0g9bcsc')
# The original data is still present and unchanged
self.assertEqual(task.data.get('old'), 'hello')
# The new data has been added
self.assertEqual(task.data.get('new'), 'HELLO')
def set_process_data(self, data):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**data)
self.workflow.do_engine_steps()
| 3,972 | Python | .py | 73 | 46.068493 | 106 | 0.675432 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
880 | ServiceTaskVariableTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/spiff/ServiceTaskVariableTest.py | import json
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class ServiceTaskDelegate:
@staticmethod
def call_connector(name, params, task_data):
assertEqual(name, 'bamboohr/GetPayRate')
assertEqual(len(params), 3)
assertEqual(params['api_key']['value'], 'secret:BAMBOOHR_API_KEY')
assertEqual(params['employee_id']['value'], '109')
assertEqual(params['subdomain']['value'], 'statusdemo')
sample_response = {
"amount": "65000.00",
"currency": "USD",
"id": "4",
"payRate": "65000.00 USD",
}
return json.dumps(sample_response)
class ExampleCustomScriptEngine(PythonScriptEngine):
def call_service(self, operation_name, operation_params, task_data):
return ServiceTaskDelegate.call_connector(operation_name, operation_params,
task_data)
class ServiceTaskVariableTest(BaseTestCase):
def setUp(self):
global assertEqual
assertEqual = self.assertEqual
spec, subprocesses = self.load_workflow_spec('service_task_variable.bpmn', 'Process_bd2e724555')
self.script_engine = ExampleCustomScriptEngine()
self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=self.script_engine)
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
self._assert_service_task()
def testRunThroughSaveRestore(self):
self.save_restore()
# Engine isn't preserved through save/restore, so we have to reset it.
self.workflow.script_engine = self.script_engine
self.workflow.do_engine_steps()
self.save_restore()
self._assert_service_task()
def _assert_service_task(self):
result = self.workflow.data['spiff__Activity_0xhr131_result']
self.assertEqual(len(result), 4)
self.assertEqual(result['amount'], '65000.00')
self.assertEqual(result['currency'], 'USD')
self.assertEqual(result['id'], '4')
self.assertEqual(result['payRate'], '65000.00 USD')
| 2,167 | Python | .py | 47 | 38.085106 | 104 | 0.682811 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
881 | CallActivityTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/CallActivityTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from SpiffWorkflow.bpmn.exceptions import WorkflowTaskException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class CallActivityTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('call_activity_*.bpmn', 'Process_8200379')
def test_data_persists_through_call_activity(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
self.assertDictEqual(self.workflow.data, {'pre_var': 'some string', 'my_var': 'World', 'my_other_var': 'Mike'})
def test_call_activity_has_same_script_engine(self):
class CustomScriptEngine(PythonScriptEngine):
pass
self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=CustomScriptEngine())
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertIsInstance(self.workflow.script_engine, CustomScriptEngine)
# Get the subworkflow
sub_task = self.workflow.get_next_task(spec_name='Sub_Bpmn_Task')
sub_workflow = sub_task.workflow
self.assertNotEqual(sub_workflow, self.workflow)
self.assertIsInstance(self.workflow.script_engine, CustomScriptEngine)
self.assertEqual(sub_workflow.script_engine, self.workflow.script_engine)
def test_call_activity_allows_removal_of_data(self):
# If a call activity alters the data - removing existing keys, that
# data should be removed in the final output as well.
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertNotIn('remove_this_var', self.workflow.last_task.data.keys())
def test_call_activity_errors_include_task_trace(self):
error_spec = self.subprocesses.get('ErroringBPMN')
error_spec, subprocesses = self.load_workflow_spec('call_activity_*.bpmn', 'ErroringBPMN')
with self.assertRaises(WorkflowTaskException) as context:
self.workflow = BpmnWorkflow(error_spec, subprocesses)
self.workflow.do_engine_steps()
self.assertEqual(2, len(context.exception.task_trace))
self.assertRegex(context.exception.task_trace[0], 'Create Data \(.*?call_activity_call_activity.bpmn\)')
self.assertRegex(context.exception.task_trace[1], 'Get Data Call Activity \(.*?call_activity_with_error.bpmn\)')
task = self.workflow.get_next_task(spec_name='Sub_Bpmn_Task')
self.assertEqual(task.state, TaskState.ERROR)
def test_order_of_tasks_in_get_task_is_call_acitivty_task_first_then_sub_tasks(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
tasks = self.workflow.get_tasks()
def index_of(name):
return [i for i, x in enumerate(tasks) if x.task_spec.name == name][0]
self.assertLess(index_of('Activity_Call_Activity'), index_of('Start_Called_Activity'))
self.assertLess(index_of('Activity_Call_Activity'), index_of('Sub_Bpmn_Task'))
self.assertLess(index_of('Activity_Call_Activity'), index_of('End_Called_Activity'))
| 3,390 | Python | .py | 53 | 55.90566 | 120 | 0.719831 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
882 | FeelExpressionEngineTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/FeelExpressionEngineTest.py | import datetime
from SpiffWorkflow.bpmn.script_engine.feel_engine import FeelLikeScriptEngine, FeelInterval
from SpiffWorkflow.bpmn.script_engine import TaskDataEnvironment
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class FeelExpressionTest(BpmnWorkflowTestCase):
def setUp(self):
self.expressionEngine = FeelLikeScriptEngine(environment=TaskDataEnvironment())
def testRunThroughExpressions(self):
tests = [("string length('abcd')", 4, {}),
("contains('abcXYZdef','XYZ')", True, {}),
("list contains(x,'b')", True, {'x': ['a', 'b', 'c']}),
("list contains(x,'z')", False, {'x': ['a', 'b', 'c']}),
# ("list contains(['a','b','c'],'b')",True,{}), # fails due to parse error
("all ([True,True,True])", True, {}),
("all ([True,False,True])", False, {}),
("any ([False,False,False])", False, {}),
("any ([True,False,True])", True, {}),
("PT3S", datetime.timedelta(seconds=3), {}),
("d[item>1]",[2,3,4],{'d':[1,2,3,4]}),
("d[x>=2].y",[2,3,4],{'d':[{'x':1,'y':1},
{'x': 2, 'y': 2},
{'x': 3, 'y': 3},
{'x': 4, 'y': 4},
]}),
("concatenate(a,b,c)", ['a', 'b', 'c'], {'a': ['a'],
'b': ['b'],
'c': ['c'],
}),
("append(a,'c')", ['a', 'b', 'c'], {'a': ['a', 'b']}),
("now()", FeelInterval(datetime.datetime.now() - datetime.timedelta(seconds=1),
datetime.datetime.now() + datetime.timedelta(seconds=1)),
{}),
("day of week('2020-05-07')", 4, {}),
("day of week(a)", 0, {'a': datetime.datetime(2020, 5, 3)}),
("list contains(a.keys(),'b')", True, {'a': {'b': ['a', 'x']}}),
("list contains(a.keys(),'c')", False, {'a': {'b': ['a', 'x']}}),
]
for test in tests:
self.assertEqual(self.expressionEngine._evaluate(test[0], test[2]),
test[1], "test --> %s <-- with variables ==> %s <==Fail!" % (test[0], str(test[2])))
| 2,542 | Python | .py | 41 | 41.292683 | 113 | 0.402967 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
883 | DiffUtilTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/DiffUtilTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.util.diff import SpecDiff, WorkflowDiff, diff_workflow
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class CompareSpecTest(BpmnWorkflowTestCase):
def test_tasks_added(self):
v1_spec, v1_sp_specs = self.load_workflow_spec('diff/v1.bpmn', 'Process')
v2_spec, v2_sp_specs = self.load_workflow_spec('diff/v2.bpmn', 'Process')
result = SpecDiff(self.serializer.registry, v1_spec, v2_spec)
self.assertEqual(len(result.added), 3)
self.assertIn(v2_spec.task_specs.get('Gateway_1618q26'), result.added)
self.assertIn(v2_spec.task_specs.get('Activity_1ds7clb'), result.added)
self.assertIn(v2_spec.task_specs.get('Event_0tatpgq'), result.added)
def test_tasks_removed(self):
v1_spec, v1_sp_specs = self.load_workflow_spec('diff/v1.bpmn', 'Process')
v2_spec, v2_sp_specs = self.load_workflow_spec('diff/v2.bpmn', 'Process')
result = SpecDiff(self.serializer.registry, v2_spec, v1_spec)
self.assertEqual(len(result.removed), 3)
self.assertIn(v2_spec.task_specs.get('Gateway_1618q26'), result.removed)
self.assertIn(v2_spec.task_specs.get('Activity_1ds7clb'), result.removed)
self.assertIn(v2_spec.task_specs.get('Event_0tatpgq'), result.removed)
def test_tasks_changed(self):
v2_spec, v2_sp_specs = self.load_workflow_spec('diff/v2.bpmn', 'Process')
v3_spec, v3_sp_specs = self.load_workflow_spec('diff/v3.bpmn', 'Process')
result = SpecDiff(self.serializer.registry, v2_spec, v3_spec)
# The deafult output was changed and a the conditional output was converted to a subprocess
self.assertListEqual(
result.changed.get(v2_spec.task_specs.get('Gateway_1618q26')),
['outputs', 'cond_task_specs', 'default_task_spec']
)
# The generic task was changed to a subprocess
self.assertListEqual(
result.changed.get(v2_spec.task_specs.get('Activity_1ds7clb')),
['typename']
)
def test_alignment(self):
v2_spec, v2_sp_specs = self.load_workflow_spec('diff/v2.bpmn', 'Process')
v3_spec, v3_sp_specs = self.load_workflow_spec('diff/v3.bpmn', 'Process')
result = SpecDiff(self.serializer.registry, v2_spec, v3_spec)
old_end_event = v2_spec.task_specs.get('Event_0rilo47')
new_end_event = v3_spec.task_specs.get('Event_18osyv3')
self.assertEqual(result.alignment[old_end_event], new_end_event)
for old, new in result.alignment.items():
if old is not old_end_event:
self.assertEqual(old.name, new.name)
def test_multiple(self):
v4_spec, v4_sp_specs = self.load_workflow_spec('diff/v4.bpmn', 'Process')
v5_spec, v5_sp_specs = self.load_workflow_spec('diff/v5.bpmn', 'Process')
result = SpecDiff(self.serializer.registry, v4_spec, v5_spec)
self.assertEqual(len(result.removed), 4)
self.assertEqual(len(result.changed), 4)
self.assertIn(v4_spec.task_specs.get('Gateway_0z1qhgl'), result.removed)
self.assertIn(v4_spec.task_specs.get('Gateway_1acqedb'), result.removed)
self.assertIn(v4_spec.task_specs.get('Activity_1lmz1t0'), result.removed)
self.assertIn(v4_spec.task_specs.get('Activity_11gnihu'), result.removed)
self.assertListEqual(
result.changed.get(v4_spec.task_specs.get('Gateway_1618q26')),
['outputs', 'cond_task_specs', 'default_task_spec']
)
self.assertListEqual(
result.changed.get(v4_spec.task_specs.get('Gateway_0p4fq77')),
['inputs']
)
self.assertListEqual(
result.changed.get(v4_spec.task_specs.get('Activity_1ds7clb')),
['typename']
)
class CompareWorkflowTest(BpmnWorkflowTestCase):
def test_changed(self):
v3_spec, v3_sp_specs = self.load_workflow_spec('diff/v3.bpmn', 'Process')
v4_spec, v4_sp_specs = self.load_workflow_spec('diff/v4.bpmn', 'Process')
spec_diff = SpecDiff(self.serializer.registry, v3_spec, v4_spec)
sp_spec_diff = SpecDiff(
self.serializer.registry,
v3_sp_specs['Activity_1ds7clb'],
v4_sp_specs['Activity_1ds7clb']
)
workflow = BpmnWorkflow(v3_spec, v3_sp_specs)
task = workflow.get_next_task(state=TaskState.READY, manual=False)
while task is not None:
task.run()
task = workflow.get_next_task(state=TaskState.READY, manual=False)
wf_diff = WorkflowDiff(workflow, spec_diff)
self.assertEqual(len(wf_diff.changed), 2)
self.assertIn(workflow.get_next_task(spec_name='Activity_0b53566'), wf_diff.changed)
self.assertIn(workflow.get_next_task(spec_name='Gateway_1618q26'), wf_diff.changed)
sp = workflow.get_subprocess(workflow.get_next_task(spec_name='Activity_1ds7clb'))
sp_diff = WorkflowDiff(sp, sp_spec_diff)
self.assertEqual(len(sp_diff.changed), 2)
self.assertIn(workflow.get_next_task(spec_name='Activity_0uijumg'), sp_diff.changed)
self.assertIn(workflow.get_next_task(spec_name='Event_1wlwaz1'), sp_diff.changed)
def test_removed(self):
v4_spec, v4_sp_specs = self.load_workflow_spec('diff/v4.bpmn', 'Process')
v5_spec, v5_sp_specs = self.load_workflow_spec('diff/v5.bpmn', 'Process')
spec_diff = SpecDiff(self.serializer.registry, v4_spec, v5_spec)
sp_spec_diff = SpecDiff(
self.serializer.registry,
v4_sp_specs['Activity_1ds7clb'],
v5_sp_specs['Activity_1ds7clb']
)
workflow = BpmnWorkflow(v4_spec, v4_sp_specs)
task = workflow.get_next_task(state=TaskState.READY)
while task is not None:
if task.task_spec.name != 'Activity_16ggmbf':
task.run()
else:
break
task = workflow.get_next_task(state=TaskState.READY)
wf_diff = WorkflowDiff(workflow, spec_diff)
self.assertEqual(len(wf_diff.removed), 5)
self.assertIn(workflow.get_next_task(spec_name='Gateway_0z1qhgl'), wf_diff.removed)
self.assertIn(workflow.get_next_task(spec_name='Activity_1lmz1t0'), wf_diff.removed)
self.assertIn(workflow.get_next_task(spec_name='Activity_11gnihu'), wf_diff.removed)
self.assertIn(workflow.get_next_task(spec_name='Gateway_1acqedb'), wf_diff.removed)
def test_subprocess_changed(self):
v3_spec, v3_sp_specs = self.load_workflow_spec('diff/v3.bpmn', 'Process')
v4_spec, v4_sp_specs = self.load_workflow_spec('diff/v4.bpmn', 'Process')
workflow = BpmnWorkflow(v3_spec, v3_sp_specs)
task = workflow.get_next_task(state=TaskState.READY, manual=False)
while task is not None:
task.run()
task = workflow.get_next_task(state=TaskState.READY, manual=False)
result, sp_result = diff_workflow(self.serializer.registry, workflow, v4_spec, v4_sp_specs)
sp_task = workflow.get_next_task(spec_name='Activity_1ds7clb')
self.assertIn(sp_task.id, sp_result)
| 7,208 | Python | .py | 125 | 48.256 | 99 | 0.663179 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
884 | ResetSubProcessTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ResetSubProcessTest(BpmnWorkflowTestCase):
"""Assure we can reset a token to a previous task when we have
a sub-workflow."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('resetworkflowA-*.bpmn', 'TopLevel')
self.workflow = BpmnWorkflow(spec, subprocesses)
def reload_save_restore(self):
spec, subprocesses = self.load_workflow_spec('resetworkflowB-*.bpmn', 'TopLevel')
self.workflow = BpmnWorkflow(spec, subprocesses)
# Save and restore the workflow, without including the spec.
# When loading the spec, use a slightly different spec.
self.workflow.do_engine_steps()
state = self.serializer.serialize_json(self.workflow)
self.workflow = self.serializer.deserialize_json(state)
self.workflow.spec = spec
self.workflow.subprocess_specs = subprocesses
def testSaveRestore(self):
self.actualTest(True)
def testResetToOuterWorkflowWhileInSubWorkflow(self):
self.workflow.do_engine_steps()
top_level_task = self.get_ready_user_tasks()[0]
top_level_task.run()
self.workflow.do_engine_steps()
task = self.get_ready_user_tasks()[0]
self.save_restore()
top_level_task = self.workflow.get_next_task(spec_name='Task1')
self.workflow.reset_from_task_id(top_level_task.id)
task = self.get_ready_user_tasks()[0]
self.assertEqual(len(self.get_ready_user_tasks()), 1, "There should only be one task in a ready state.")
self.assertEqual(task.task_spec.name, 'Task1')
def actualTest(self, save_restore=False):
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.get_ready_user_tasks()))
task = self.get_ready_user_tasks()[0]
task.run()
self.workflow.do_engine_steps()
task = self.get_ready_user_tasks()[0]
self.assertEqual(task.task_spec.name,'SubTask2')
task.run()
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(spec_name='Task1')
task.reset_branch(self.workflow.last_task.data)
self.workflow.do_engine_steps()
self.reload_save_restore()
task = self.get_ready_user_tasks()[0]
self.assertEqual(task.task_spec.name,'Task1')
task.run()
self.workflow.do_engine_steps()
task = self.get_ready_user_tasks()[0]
self.assertEqual(task.task_spec.name,'Subtask2')
task.run()
self.workflow.do_engine_steps()
task = self.get_ready_user_tasks()[0]
self.assertEqual(task.task_spec.name,'Subtask2A')
task.run()
self.workflow.do_engine_steps()
task = self.get_ready_user_tasks()[0]
self.assertEqual(task.task_spec.name,'Task2')
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 3,024 | Python | .py | 64 | 38.953125 | 112 | 0.667684 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
885 | ServiceTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ServiceTaskTest.py | # -*- coding: utf-8 -*-
import os
import sys
import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
dirname = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(dirname, '..', '..', '..'))
class ServiceTaskTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('service_task.bpmn',
'service_task_example1')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ServiceTaskTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 775 | Python | .py | 19 | 36.473684 | 78 | 0.728972 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
886 | SwimLaneTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/SwimLaneTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class SwimLaneTest(BpmnWorkflowTestCase):
"""
Test sample bpmn document to make sure the nav list
contains the correct swimlane in the 'lane' component
and make sure that our waiting tasks accept a lane parameter
and that it picks up the correct tasks.
"""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('lanes.bpmn','lanes')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testBpmnParserKnowsLanesExist(self):
parser = self.get_parser('lanes.bpmn')
self.assertTrue(parser.get_process_parser('lanes').has_lanes())
parser = self.get_parser('random_fact.bpmn')
self.assertFalse(parser.get_process_parser('random_fact').has_lanes())
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
atasks = self.get_ready_user_tasks(lane="A")
btasks = self.get_ready_user_tasks(lane="B")
self.assertEqual(1, len(atasks))
self.assertEqual(0, len(btasks))
task = atasks[0]
self.assertEqual('Activity_A1', task.task_spec.name)
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
atasks = self.get_ready_user_tasks(lane="A")
btasks = self.get_ready_user_tasks(lane="B")
self.assertEqual(0, len(atasks))
self.assertEqual(1, len(btasks))
# Complete the gateway and the two tasks in B Lane
btasks[0].data = {'NeedClarification': False}
self.workflow.run_task_from_id(btasks[0].id)
self.workflow.do_engine_steps()
btasks = self.get_ready_user_tasks(lane="B")
self.workflow.run_task_from_id(btasks[0].id)
self.workflow.do_engine_steps()
# Assert we are in lane C
tasks = self.get_ready_user_tasks()
self.assertEqual(1, len(tasks))
self.assertEqual(tasks[0].task_spec.lane, "C")
# Step into the sub-process, assure that is also in lane C
self.workflow.run_task_from_id(tasks[0].id)
self.workflow.do_engine_steps()
tasks = self.get_ready_user_tasks()
self.assertEqual("SubProcessTask", tasks[0].task_spec.bpmn_name)
self.assertEqual(tasks[0].task_spec.lane, "C")
| 2,392 | Python | .py | 50 | 40.14 | 78 | 0.676105 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
887 | ResetTokenOnBoundaryEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ResetTokenOnBoundaryEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class ResetTokenOnBoundaryEventTest(BpmnWorkflowTestCase):
"""Assure that when we reset a token to a previous task, and that
task has a boundary event, that the boundary event is reset to the
correct state."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('reset_with_boundary_event.bpmn', 'token')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testResetToOuterWorkflow(self):
self.reset_to_outer_workflow(save_restore=False)
def testResetToSubprocess(self):
self.reset_to_subprocess(save_restore=False)
def testSaveRestore(self):
self.reset_to_outer_workflow(save_restore=True)
def reset_to_outer_workflow(self, save_restore=False):
# Advance insie the subworkflow
self.advance_to_task('Last')
sub = self.workflow.get_next_task(spec_name='subprocess')
timer_event = self.workflow.get_next_task(spec_name='Event_My_Timer')
self.assertEqual(TaskState.CANCELLED, timer_event.state)
if save_restore:
self.save_restore()
# Here we reset back to the first task
first = self.workflow.get_next_task(spec_name='First')
self.workflow.reset_from_task_id(first.id)
if save_restore:
self.save_restore()
# At which point, the timer event should return to a waiting state, the subprocess shoud have been removed
task = self.workflow.get_next_task(spec_name='First')
self.assertEqual(task.state, TaskState.READY)
timer_event = self.workflow.get_next_task(spec_name='Event_My_Timer')
self.assertEqual(timer_event.state, TaskState.WAITING)
self.assertNotIn(sub.id, self.workflow.subprocesses)
# Ensure the workflow can be completed without being stuck on stranded tasks
self.complete_workflow()
self.assertTrue(self.workflow.completed)
def reset_to_subprocess(self, save_restore=False):
# Advance past the subworkflow
self.advance_to_task('Final')
if save_restore:
self.save_restore()
# Reset to a task inside the subworkflow
task = self.workflow.get_next_task(spec_name='Last')
self.workflow.reset_from_task_id(task.id)
if save_restore:
self.save_restore()
# The task we returned to should be ready, the subprocess should be waiting, the final task should be future
sub = self.workflow.get_next_task(spec_name='subprocess')
self.assertEqual(sub.state, TaskState.STARTED)
self.assertEqual(task.state, TaskState.READY)
final = self.workflow.get_next_task(spec_name='Final')
self.assertEqual(final.state, TaskState.FUTURE)
# Ensure the workflow can be completed without being stuck on stranded tasks
self.complete_workflow()
self.assertTrue(self.workflow.completed)
def advance_to_task(self, name):
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
while ready_tasks[0].task_spec.name != name:
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
def complete_workflow(self):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
while len(ready_tasks) > 0:
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
| 3,778 | Python | .py | 72 | 43.416667 | 116 | 0.688756 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
888 | InclusiveGatewayTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/InclusiveGatewayTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.exceptions import WorkflowTaskException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class InclusiveGatewayTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocess = self.load_workflow_spec('inclusive_gateway.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
self.workflow.do_engine_steps()
def testDefaultConditionOnly(self):
self.set_data({'v': -1, 'u': -1, 'w': -1})
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'v': 0, 'u': -1, 'w': -1})
def testDefaultConditionOnlySaveRestore(self):
self.set_data({'v': -1, 'u': -1, 'w': -1})
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'v': 0, 'u': -1, 'w': -1})
def testNoPathFromSecondGateway(self):
self.set_data({'v': 0, 'u': -1, 'w': -1})
self.assertRaises(WorkflowTaskException, self.workflow.do_engine_steps)
task = self.workflow.get_next_task(spec_name='second')
self.assertEqual(task.state, TaskState.ERROR)
def testParallelCondition(self):
self.set_data({'v': 0, 'u': 1, 'w': 1})
gw = self.workflow.get_next_task(state=TaskState.READY)
gw.run()
self.assertIsNone(self.workflow.get_next_task(spec_name='increment_v'))
self.assertTrue(self.workflow.get_next_task(spec_name='u_plus_v').state, TaskState.READY)
self.assertTrue(self.workflow.get_next_task(spec_name='w_plus_v').state, TaskState.READY)
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'v': 0, 'u': 1, 'w': 1})
def set_data(self, value):
task = self.get_ready_user_tasks()[0]
task.data = value
task.run()
| 2,017 | Python | .py | 39 | 44.153846 | 97 | 0.670051 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
889 | CustomScriptTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/CustomScriptTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from SpiffWorkflow.bpmn.exceptions import WorkflowTaskException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'McDonald, danfunk'
def my_custom_function(txt):
return str(txt).upper()
class CustomBpmnScriptEngine(PythonScriptEngine):
"""This is a custom script processor that can be easily injected into Spiff Workflow.
It will execute python code read in from the bpmn. It will also make any scripts in the
scripts directory available for execution. """
def __init__(self):
environment = TaskDataEnvironment({'custom_function': my_custom_function})
super().__init__(environment=environment)
class CustomInlineScriptTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('custom_function_test*', 'top_workflow')
script_engine = CustomBpmnScriptEngine()
self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=script_engine)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=False)
def actual_test(self, save_restore):
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
data = self.workflow.last_task.data
self.assertEqual(data['c1'], 'HELLO')
self.assertEqual(data['c2'], 'GOODBYE')
self.assertEqual(data['c3'], 'ARRIVEDERCI')
def test_overwrite_function_with_local_variable(self):
ready_task = self.workflow.get_tasks(state=TaskState.READY)[0]
ready_task.data = {'custom_function': "bill"}
with self.assertRaises(WorkflowTaskException) as e:
self.workflow.do_engine_steps()
self.assertTrue('custom_function' in str(e.exception))
task = self.workflow.get_next_task(spec_name='Activity_1y303ko')
self.assertEqual(task.state, TaskState.ERROR)
| 2,158 | Python | .py | 42 | 44.47619 | 93 | 0.72399 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
890 | CollaborationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/CollaborationTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.mixins import CallActivityMixin
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class CollaborationTest(BpmnWorkflowTestCase):
def testParserProvidesInfoOnMessagesAndCorrelations(self):
parser = self.get_parser('collaboration.bpmn')
self.assertEqual(list(parser.messages.keys()), ['love_letter', 'love_letter_response'])
self.assertEqual(parser.correlations,
{'lover_name': {'name': "Lover's Name",
'retrieval_expressions': [
{'expression': 'lover_name',
'messageRef': 'love_letter'},
{'expression': 'from_name',
'messageRef': 'love_letter_response'}]}}
)
def testCollaboration(self):
spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration')
# Only executable processes should be started
self.assertIn('process_buddy', subprocesses)
self.assertNotIn('random_person_process', subprocesses)
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.task_tree
# Set up some data to be evaluated so that the workflow can proceed
start.data['lover_name'] = 'Peggy'
self.workflow.do_engine_steps()
# Call activities should be created for executable processes and be reachable
buddy = self.workflow.get_next_task(spec_name='process_buddy')
self.assertIsInstance(buddy.task_spec, CallActivityMixin)
self.assertEqual(buddy.task_spec.spec, 'process_buddy')
self.assertEqual(buddy.state, TaskState.STARTED)
def testBpmnMessage(self):
spec, subprocesses = self.load_workflow_spec('collaboration.bpmn', 'process_buddy')
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.get_tasks(end_at_spec='Start')[0]
# Set up some data to be evaluated so that the workflow can proceed
start.data['lover_name'] = 'Peggy'
self.workflow.do_engine_steps()
# An external message should be created
messages = self.workflow.get_events()
self.assertEqual(len(messages), 1)
self.assertEqual(len(self.workflow.bpmn_events), 0)
receive = self.workflow.get_next_task(spec_name='EventReceiveLetter')
# Waiting Events should contain details about what we are no waiting on.
events = self.workflow.waiting_events()
self.assertEqual(1, len(events))
self.assertEqual("MessageEventDefinition", events[0].event_type)
self.assertEqual("Love Letter Response", events[0].name)
self.assertEqual(['lover'], events[0].value[0].correlation_keys)
self.assertEqual('from_name', events[0].value[0].retrieval_expression)
self.assertEqual('lover_name', events[0].value[0].name)
message = BpmnEvent(
receive.task_spec.event_definition,
{'from_name': 'Peggy', 'other_nonsense': 1001}
)
self.workflow.send_event(message)
self.workflow.do_engine_steps()
self.assertEqual(receive.state, TaskState.COMPLETED)
self.assertEqual(self.workflow.last_task.data, {'from_name': 'Peggy', 'lover_name': 'Peggy', 'other_nonsense': 1001})
self.assertEqual(self.workflow.correlations, {'lover':{'lover_name':'Peggy'}})
self.assertEqual(self.workflow.completed, True)
def testCorrelation(self):
specs = self.get_all_specs('correlation.bpmn')
proc_1 = specs['proc_1']
self.workflow = BpmnWorkflow(proc_1, specs)
self.workflow.do_engine_steps()
for idx, task in enumerate(self.get_ready_user_tasks()):
task.data['task_num'] = idx
task.run()
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
waiting = self.workflow.get_tasks(spec_name='get_response')
# Two processes should have been started and two corresponding catch events should be waiting
self.assertEqual(len(ready_tasks), 2)
self.assertEqual(len(waiting), 2)
for task in waiting:
self.assertEqual(task.state, TaskState.WAITING)
# Now copy the task_num that was sent into a new variable
for task in ready_tasks:
task.data.update(init_id=task.data['task_num'])
task.run()
self.workflow.do_engine_steps()
# If the messages were routed properly, the id should match
for task in self.workflow.get_next_task(spec_name='subprocess_end'):
self.assertEqual(task.data['task_num'], task.data['init_id'])
def testTwoCorrelationKeys(self):
specs = self.get_all_specs('correlation_two_conversations.bpmn')
proc_1 = specs['proc_1']
self.workflow = BpmnWorkflow(proc_1, specs)
self.workflow.do_engine_steps()
for idx, task in enumerate(self.get_ready_user_tasks()):
task.data['task_num'] = idx
task.run()
self.workflow.do_engine_steps()
# Two processes should have been started and two corresponding catch events should be waiting
ready_tasks = self.get_ready_user_tasks()
waiting = self.workflow.get_tasks(spec_name='get_response_one')
self.assertEqual(len(ready_tasks), 2)
self.assertEqual(len(waiting), 2)
for task in waiting:
self.assertEqual(task.state, TaskState.WAITING)
# Now copy the task_num that was sent into a new variable
for task in ready_tasks:
task.data.update(init_id=task.data['task_num'])
task.run()
self.workflow.do_engine_steps()
# Complete dummy tasks
for task in self.get_ready_user_tasks():
task.run()
self.workflow.do_engine_steps()
# Repeat for the other process, using a different mapped name
ready_tasks = self.get_ready_user_tasks()
waiting = self.workflow.get_tasks(spec_name='get_response_two')
self.assertEqual(len(ready_tasks), 2)
self.assertEqual(len(waiting), 2)
for task in ready_tasks:
task.data.update(subprocess=task.data['task_num'])
task.run()
self.workflow.do_engine_steps()
# If the messages were routed properly, the id should match
for task in self.workflow.get_tasks(spec_name='subprocess_end'):
self.assertEqual(task.data['task_num'], task.data['init_id'])
self.assertEqual(task.data['task_num'], task.data['subprocess'])
def testSerialization(self):
spec, subprocesses = self.load_collaboration('collaboration.bpmn', 'my_collaboration')
self.workflow = BpmnWorkflow(spec, subprocesses)
start = self.workflow.get_tasks(end_at_spec='Start')[0]
start.data['lover_name'] = 'Peggy'
self.workflow.do_engine_steps()
self.save_restore()
| 7,143 | Python | .py | 130 | 44.053846 | 125 | 0.646453 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
891 | ParallelMultiInstanceTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ParallelMultiInstanceTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.exceptions import WorkflowDataException
from SpiffWorkflow.bpmn.specs.data_spec import TaskDataReference
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
class BaseTestCase(BpmnWorkflowTestCase):
def set_io_and_run_workflow(self, data, data_input=None, data_output=None, save_restore=False):
start = self.workflow.get_next_task(end_at_spec='Start')
start.data = data
any_task = self.workflow.get_next_task(spec_name='any_task')
any_task.task_spec.data_input = TaskDataReference(data_input) if data_input is not None else None
any_task.task_spec.data_output = TaskDataReference(data_output) if data_output is not None else None
self.workflow.do_engine_steps()
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(len(task_info['completed']), 0)
self.assertEqual(len(task_info['running']), 3)
self.assertEqual(len(task_info['future']), 0)
self.assertEqual(len(task_info['instance_map']), 3)
instance_map = task_info['instance_map']
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
while len(ready_tasks) > 0:
task = ready_tasks[0]
task_info = task.task_spec.task_info(task)
self.assertEqual(task.task_spec.name, 'any_task [child]')
self.assertIn('input_item', task.data)
self.assertEqual(instance_map[task_info['instance']], str(task.id))
task.data['output_item'] = task.data['input_item'] * 2
task.run()
if save_restore:
self.save_restore()
ready_tasks = self.get_ready_user_tasks()
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
any_task = self.workflow.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(len(task_info['completed']), 3)
self.assertEqual(len(task_info['running']), 0)
self.assertEqual(len(task_info['future']), 0)
self.assertTrue(self.workflow.completed)
def run_workflow_with_condition(self, data):
start = self.workflow.get_next_task(end_at_spec='Start')
start.data = data
task = self.workflow.get_next_task(spec_name='any_task')
task.task_spec.condition = "input_item == 2"
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
task = [t for t in ready_tasks if t.data['input_item'] == 2][0]
task.data['output_item'] = task.data['input_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
self.assertTrue(self.workflow.completed)
self.assertEqual(len([ t for t in ready_tasks if t.state == TaskState.CANCELLED]), 2)
class ParallellMultiInstanceExistingOutputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('parallel_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testListWithDictOutput(self):
data = {
'input_data': [1, 2, 3],
'output_data': {},
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': {0: 2, 1: 4, 2: 6},
})
def testDictWithListOutput(self):
data = {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': [],
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': [2, 4, 6],
})
def testNonEmptyOutput(self):
with self.assertRaises(WorkflowDataException) as exc:
data = {
'input_data': [1, 2, 3],
'output_data': [1, 2, 3],
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertEqual(exc.exception.message,
"If the input is not being updated in place, the output must be empty or it must be a map (dict)")
def testInvalidOutputType(self):
with self.assertRaises(WorkflowDataException) as exc:
data = {
'input_data': set([1, 2, 3]),
'output_data': set(),
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertEqual(exc.exception.message, "Only a mutable map (dict) or sequence (list) can be used for output")
class ParallelMultiInstanceNewOutputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('parallel_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testList(self):
data = {'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [2, 4, 6]
})
def testListSaveRestore(self):
data = {'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [2, 4, 6]
})
def testDict(self):
data = {'input_data': {'a': 1, 'b': 2, 'c': 3} }
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': {'a': 2, 'b': 4, 'c': 6}
})
def testDictSaveRestore(self):
data = {'input_data': {'a': 1, 'b': 2, 'c': 3} }
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': {'a': 2, 'b': 4, 'c': 6}
})
def testSet(self):
data = {'input_data': set([1, 2, 3])}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': set([1, 2, 3]),
'output_data': [2, 4, 6]
})
def testEmptyCollection(self):
start = self.workflow.get_next_task(end_at_spec='Start')
start.data = {'input_data': []}
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'input_data': [], 'output_data': []})
def testCondition(self):
self.run_workflow_with_condition({'input_data': [1, 2, 3]})
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [4]
})
class ParallelMultiInstanceUpdateInputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('parallel_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testList(self):
data = { 'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='input_data')
self.assertDictEqual(self.workflow.data, {'input_data': [2, 4, 6]})
def testDict(self):
data = { 'input_data': {'a': 1, 'b': 2, 'c': 3}}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='input_data')
self.assertDictEqual(self.workflow.data, {'input_data': {'a': 2, 'b': 4, 'c': 6}})
class ParallelMultiInstanceWithCardinality(BaseTestCase):
def setUp(self) -> None:
self.spec, subprocess = self.load_workflow_spec('parallel_multiinstance_cardinality.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testCardinality(self):
self.set_io_and_run_workflow({}, data_output='output_data')
self.assertDictEqual(self.workflow.data, {'output_data': [0, 2, 4]})
def testCardinalitySaveRestore(self):
self.set_io_and_run_workflow({}, data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {'output_data': [0, 2, 4]})
def testCondition(self):
self.run_workflow_with_condition({})
self.assertDictEqual(self.workflow.data, {
'output_data': [4]
})
class ParallelMultiInstanceTaskTest(BpmnWorkflowTestCase):
def check_reference(self, reference, name):
self.assertIsInstance(reference, TaskDataReference)
self.assertEqual(reference.bpmn_id, name)
def testParseInputOutput(self):
spec, subprocess = self.load_workflow_spec('parallel_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
task_spec = self.workflow.get_next_task(spec_name='any_task').task_spec
self.check_reference(task_spec.data_input, 'input_data')
self.check_reference(task_spec.data_output, 'output_data')
self.check_reference(task_spec.input_item, 'input_item')
self.check_reference(task_spec.output_item, 'output_item')
self.assertIsNone(task_spec.cardinality)
def testParseCardinality(self):
spec, subprocess = self.load_workflow_spec('parallel_multiinstance_cardinality.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
task_spec = self.workflow.get_next_task(spec_name='any_task').task_spec
self.assertIsNone(task_spec.data_input)
self.assertEqual(task_spec.cardinality, '3')
def testInvalidBpmn(self):
with self.assertRaises(ValidationException) as exc:
spec, subprocess = self.load_workflow_spec('parallel_multiinstance_invalid.bpmn', 'main')
self.assertEqual(exc.exception.message,
'A multiinstance task must specify exactly one of cardinality or loop input data reference.')
| 10,535 | Python | .py | 199 | 43.527638 | 122 | 0.633233 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
892 | ParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ParserTest.py | import unittest
import os
from SpiffWorkflow.bpmn.parser import BpmnParser
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = BpmnParser()
def testIOSpecification(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'io_spec.bpmn')
self.parser.add_bpmn_file(bpmn_file)
spec = self.parser.get_spec('subprocess')
self.assertEqual(len(spec.io_specification.data_inputs), 2)
self.assertEqual(len(spec.io_specification.data_outputs), 2)
def testDataReferences(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'data_object.bpmn')
self.parser.add_bpmn_file(bpmn_file)
spec = self.parser.get_spec("Process")
generate = spec.task_specs['generate_data']
read = spec.task_specs['read_data']
self.assertEqual(len(generate.data_output_associations), 1)
self.assertEqual(generate.data_output_associations[0].bpmn_id, 'obj_1')
self.assertEqual(len(read.data_input_associations), 1)
self.assertEqual(read.data_input_associations[0].bpmn_id, 'obj_1')
def testSkipSubprocesses(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'call_activity_end_event.bpmn')
self.parser.add_bpmn_file(bpmn_file)
# The default is to require that call activity specs be included, so this should raise an exception
self.assertRaises(ValidationException, self.parser.get_subprocess_specs, 'Process_8200379')
# When call activity specs are skipped, no exception should be raised
subprocess_specs = self.parser.get_subprocess_specs('Process_8200379', require_call_activity_specs=False)
self.assertDictEqual(subprocess_specs, {'Call_Activity_Get_Data': None})
def testInvalidProcessID(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'call_activity_end_event.bpmn')
self.parser.add_bpmn_file(bpmn_file)
self.assertRaisesRegex(
ValidationException, "The process '\w+' was not found*",
self.parser.get_spec, "Process")
def testBoundaryEvent(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'boundary_event_split.bpmn')
self.parser.add_bpmn_file(bpmn_file)
spec = self.parser.get_spec('Process_0ymnx41')
gw1 = spec.task_specs.get('gw_1')
gw2 = spec.task_specs.get('gw_2')
task = spec.task_specs.get('task_2')
split_task = spec.task_specs.get(f'{task.name}.BoundaryEventSplit')
self.assertNotIn(task, gw1.outputs)
self.assertIn(split_task, gw1.outputs)
self.assertNotIn(task, gw2.outputs)
self.assertIn(split_task, gw2.outputs)
def testNonExecutableProcessRaisesException(self):
bpmn_file = os.path.join(os.path.dirname(__file__), 'data/Invalid-Workflows', 'non-executable-process.bpmn')
self.parser.add_bpmn_file(bpmn_file)
self.assertRaisesRegex(
ValidationException, "Process \w+ is not executable.",
self.parser.get_spec, 'Process_14di7kj'
)
| 3,190 | Python | .py | 56 | 48.660714 | 116 | 0.686318 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
893 | ProcessDependencyTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ProcessDependencyTest.py | # -*- coding: utf-8 -*-
import os
from SpiffWorkflow.camunda.parser import CamundaParser
from SpiffWorkflow.spiff.parser import SpiffBpmnParser
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'danfunk'
class ProcessDependencyTest(BpmnWorkflowTestCase):
"""
Assure we can determine all of the call activities and DMN references that
will be required by a parser, prior to calling its parse method.
Because DMN references vary between Camunda and Spiff, need to test that
both methods will work.
"""
def testCamundaParser(self):
self.actual_test(CamundaParser())
def testSpiffParser(self):
self.actual_test(SpiffBpmnParser())
def actual_test(self, parser):
# We ought to test the parsers in the packages they belong to, not here.
filename = 'call_activity_nested'
base_dir = os.path.join(os.path.dirname(__file__), 'data', filename)
parser.add_bpmn_file(os.path.join(base_dir, 'call_activity_nested.bpmn'))
dependencies = parser.get_dependencies()
self.assertEqual(3, len(dependencies))
process_deps = parser.get_process_dependencies()
self.assertEqual(2, len(process_deps))
self.assertIn('Level2', process_deps)
self.assertIn('Level2b', process_deps)
dmn_deps = parser.get_dmn_dependencies()
self.assertEqual(1, len(dmn_deps))
self.assertIn('Level2c', dmn_deps)
# Add Level 2 file, and we should find a level 3 dependency as well.
parser.add_bpmn_file(os.path.join(base_dir, 'call_activity_level_2.bpmn'))
dependencies = parser.get_dependencies()
self.assertEqual(4, len(dependencies))
self.assertIn('Level3', dependencies)
| 1,749 | Python | .py | 36 | 41.861111 | 82 | 0.705226 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
894 | InvalidWorkflowsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py | # -*- coding: utf-8 -*-
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class InvalidWorkflowsTest(BpmnWorkflowTestCase):
def testNoStartEvent(self):
try:
self.load_workflow_spec(
'Invalid-Workflows/No-Start-Event.bpmn20.xml', 'sid-669ddebf-4196-41ee-8b04-bcc90bc5f983')
self.fail("self.load_workflow_spec('Invalid-Workflows/No-Start-Event.bpmn20.xml', 'No Start Event') should fail.")
except ValidationException as ex:
self.assertTrue('No start event found' in ('%r' % ex),
'\'No start event found\' should be a substring of error message: \'%r\'' % ex)
self.assertTrue('No-Start-Event.bpmn20.xml' in ex.file_name,
'\'No-Start-Event.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex)
def testCallActivityNotFound(self):
with self.assertRaises(ValidationException) as exc:
self.load_workflow_spec('Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml', 'Subprocess Not Found')
self.assertIn("The process 'Missing subprocess' was not found.", str(exc))
self.assertIn("bpmn:callActivity (id:sid-617B0E1F-42DB-4D40-9B4C-ED631BF6E43A)", str(exc))
self.assertIn("Invalid-Workflows/Subprocess-Not-Found.bpmn20.xml", str(exc))
def testUnsupportedTask(self):
try:
self.load_workflow_spec('Invalid-Workflows/Unsupported-Task.bpmn20.xml', 'sid-00c10a31-5eb4-4f6c-a3eb-3664035ca9a7')
self.fail("self.load_workflow_spec('Invalid-Workflows/Unsupported-Task.bpmn20.xml', 'Unsupported Task') should fail.")
except ValidationException as ex:
self.assertTrue(
'There is no support implemented for this task type' in ( '%r' % ex),
'\'There is no support implemented for this task type\' should be a substring of error message: \'%r\'' % ex)
self.assertTrue('Unsupported-Task.bpmn20.xml' in ex.file_name,
'\'Unsupported-Task.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex)
self.assertTrue('businessRuleTask' in ex.tag,
'\'businessRuleTask\' should be a substring of the tag: \'%r\'' % ex)
self.assertTrue('Business Rule Task' in ex.name,
'\'Business Rule Task\' should be the name: \'%s\'' % ex.name)
| 2,535 | Python | .py | 35 | 59.571429 | 130 | 0.644605 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
895 | ApprovalsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ApprovalsTest.py | # -*- coding: utf-8 -*-
import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ApprovalsTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('Approvals.bpmn', 'Approvals')
# Start (StartTask:0xb6b4204cL)
# --> Approvals.First_Approval_Wins (CallActivity)
# --> Start (StartTask:0xb6b4266cL)
# | --> First_Approval_Wins.Supervisor_Approval (ManualTask)
# | | --> First_Approval_Wins.Supervisor_Approved (EndEvent)
# | | --> First_Approval_Wins.EndJoin (EndJoin)
# | | --> End (Simple)
# | --> First_Approval_Wins.Manager_Approval (ManualTask)
# | --> First_Approval_Wins.Manager_Approved (EndEvent)
# | --> [shown earlier] First_Approval_Wins.EndJoin (EndJoin)
# --> Approvals.First_Approval_Wins_Done (ManualTask)
# --> Approvals.Gateway4 (ParallelGateway)
# --> Approvals.Manager_Approval__P_ (ManualTask)
# | --> Approvals.Gateway5 (ParallelGateway)
# | --> Approvals.Parallel_Approvals_Done (ManualTask)
# | --> Approvals.Parallel_SP (CallActivity)
# | --> Start (StartTask)
# | | --> Parallel_Approvals_SP.Step1 (ManualTask)
# | | | --> Parallel_Approvals_SP.Supervisor_Approval (ManualTask)
# | | | --> Parallel_Approvals_SP.End2 (EndEvent)
# | | | --> Parallel_Approvals_SP.EndJoin (EndJoin)
# | | | --> End (Simple)
# | | --> Parallel_Approvals_SP.Manager_Approval (ManualTask)
# | | --> [shown earlier] Parallel_Approvals_SP.End2 (EndEvent)
# | --> Approvals.Parallel_SP_Done (ManualTask)
# | --> Approvals.End1 (EndEvent)
# | --> Approvals.EndJoin (EndJoin)
# | --> End (Simple)
# --> Approvals.Supervisor_Approval__P_ (ManualTask)
# --> [shown earlier] Approvals.Gateway5 (ParallelGateway)
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.do_next_named_step('First_Approval_Wins.Manager_Approval')
self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done')
self.do_next_named_step('Approvals.Manager_Approval__P_')
self.do_next_named_step('Approvals.Supervisor_Approval__P_')
self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done')
self.do_next_named_step('Parallel_Approvals_SP.Step1')
self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval')
self.do_next_named_step('Parallel_Approvals_SP.Supervisor_Approval')
self.do_next_exclusive_step('Approvals.Parallel_SP_Done')
def testRunThroughHappyOtherOrders(self):
self.do_next_named_step('First_Approval_Wins.Supervisor_Approval')
self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done')
self.do_next_named_step('Approvals.Supervisor_Approval__P_')
self.do_next_named_step('Approvals.Manager_Approval__P_')
self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done')
self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval')
self.do_next_named_step('Parallel_Approvals_SP.Step1')
self.do_next_named_step('Parallel_Approvals_SP.Supervisor_Approval')
self.do_next_exclusive_step('Approvals.Parallel_SP_Done')
def testSaveRestore(self):
self.do_next_named_step('First_Approval_Wins.Manager_Approval')
self.save_restore()
self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done')
self.save_restore()
self.do_next_named_step('Approvals.Supervisor_Approval__P_')
self.do_next_named_step('Approvals.Manager_Approval__P_')
self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done')
self.save_restore()
self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval')
self.do_next_exclusive_step('Parallel_Approvals_SP.Step1')
self.do_next_exclusive_step('Parallel_Approvals_SP.Supervisor_Approval')
self.do_next_exclusive_step('Approvals.Parallel_SP_Done')
def testSaveRestoreWaiting(self):
self.do_next_named_step('First_Approval_Wins.Manager_Approval')
self.save_restore()
self.do_next_exclusive_step('Approvals.First_Approval_Wins_Done')
self.save_restore()
self.do_next_named_step('Approvals.Supervisor_Approval__P_')
self.save_restore()
self.do_next_named_step('Approvals.Manager_Approval__P_')
self.save_restore()
self.do_next_exclusive_step('Approvals.Parallel_Approvals_Done')
self.save_restore()
self.do_next_named_step('Parallel_Approvals_SP.Manager_Approval')
self.save_restore()
self.do_next_exclusive_step('Parallel_Approvals_SP.Step1')
self.save_restore()
self.do_next_exclusive_step('Parallel_Approvals_SP.Supervisor_Approval')
self.save_restore()
self.do_next_exclusive_step('Approvals.Parallel_SP_Done')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ApprovalsTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 6,348 | Python | .py | 94 | 59.265957 | 133 | 0.554004 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
896 | PythonScriptEngineTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/PythonScriptEngineTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from SpiffWorkflow.bpmn.exceptions import WorkflowTaskException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'danfunk'
class PythonScriptEngineTest(BpmnWorkflowTestCase):
def setUp(self):
self.expressionEngine = PythonScriptEngine()
spec, subprocesses = self.load_workflow_spec('ScriptTest.bpmn', 'Process_1l85e0n')
self. workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
data = self.workflow.last_task.data
self.assertEqual(data,{'testvar': {'a': 1, 'b': 2, 'new': 'Test'},
'testvar2': [{'x': 1, 'y': 'a'},
{'x': 2, 'y': 'b'},
{'x': 3, 'y': 'c'}],
'sample': ['b', 'c']})
def testNoDataPollution(self):
"""Ran into an issue where data from one run of a workflow could
bleed into a separate execution. It will think a variable is there
when it should not be there"""
startTask = self.workflow.get_tasks(state=TaskState.READY)[0]
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertTrue("testvar" in self.workflow.last_task.data)
self.assertFalse("testvar" in startTask.data)
# StartTask doesn't know about testvar, it happened earlier.
# calling an exec that references testvar, in the context of the
# start task should fail.
with self.assertRaises(WorkflowTaskException):
self.workflow.script_engine.evaluate(startTask, 'testvar == True')
def testFunctionsAndGlobalsAreRemoved(self):
self.workflow.do_engine_steps()
task = self.workflow.last_task
self.assertIn('testvar', task.data)
self.assertIn('testvar2', task.data)
self.assertIn('sample', task.data)
self.assertNotIn('my_function', task.data)
| 2,136 | Python | .py | 40 | 42.9 | 90 | 0.64813 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
897 | BpmnWorkflowTestCase.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py | import json
import os
import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn.parser import BpmnValidator
from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer
from .BpmnLoaderForTests import TestBpmnParser, SERIALIZER_CONFIG
__author__ = 'matth'
registry = BpmnWorkflowSerializer.configure(SERIALIZER_CONFIG)
class BpmnWorkflowTestCase(unittest.TestCase):
serializer = BpmnWorkflowSerializer(registry)
def get_parser(self, filename, validate=True):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
validator = BpmnValidator() if validate else None
parser = TestBpmnParser(validator=validator)
parser.add_bpmn_files_by_glob(f)
return parser
def load_workflow_spec(self, filename, process_name, validate=True):
parser = self.get_parser(filename, validate)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses
def load_collaboration(self, filename, collaboration_name):
parser = self.get_parser(filename)
return parser.get_collaboration(collaboration_name)
def get_all_specs(self, filename):
parser = self.get_parser(filename)
return parser.find_all_specs()
def get_ready_user_tasks(self, lane=None):
return self.workflow.get_tasks(state=TaskState.READY, manual=True, lane=lane)
def run_until_input_required(self):
task = self.workflow.get_next_task(state=TaskState.READY, manual=False)
while task is not None:
task.run()
task = self.workflow.get_next_task(state=TaskState.READY, manual=False)
def do_next_exclusive_step(self, step_name, with_save_load=False, set_attribs=None, choice=None):
if with_save_load:
self.save_restore_all()
self.workflow.do_engine_steps()
tasks = self.workflow.get_tasks(state=TaskState.READY)
self._do_single_step(step_name, tasks, set_attribs, choice)
def do_next_named_step(self, step_name, with_save_load=False, set_attribs=None, choice=None, only_one_instance=True):
if with_save_load:
self.save_restore()
self.workflow.do_engine_steps()
step_name_path = step_name.split("|")
def switch_workflow(p):
for task_id, sp in p.workflow._get_outermost_workflow().subprocesses.items():
if p in sp.get_tasks(workflow=sp):
return p.workflow.get_task_from_id(task_id)
def is_match(t):
if not (t.task_spec.name == step_name_path[-1] or t.task_spec.bpmn_name == step_name_path[-1]):
return False
for parent_name in step_name_path[:-1]:
p = t.parent
found = False
while (p and p != p.parent):
if (p.task_spec.name == parent_name or p.task_spec.bpmn_name == parent_name):
found = True
break
if p.parent is None and p.workflow != p.workflow.parent:
p = switch_workflow(p)
else:
p = p.parent
if not found:
return False
return True
tasks = [t for t in self.workflow.get_tasks(state=TaskState.READY) if is_match(t)]
self._do_single_step(
step_name_path[-1], tasks, set_attribs, choice, only_one_instance=only_one_instance)
def assertTaskNotReady(self, step_name):
tasks = list([t for t in self.workflow.get_tasks(state=TaskState.READY)
if t.task_spec.name == step_name or t.task_spec.bpmn_name == step_name])
self.assertEqual([], tasks)
def _do_single_step(self, step_name, tasks, set_attribs=None, choice=None, only_one_instance=True):
if only_one_instance:
self.assertEqual(
len(tasks), 1, 'Did not find one task for \'%s\' (got %d)' % (step_name, len(tasks)))
else:
self.assertNotEqual(
len(tasks), 0, 'Did not find any tasks for \'%s\'' % (step_name))
self.assertTrue(
tasks[0].task_spec.name == step_name or tasks[
0].task_spec.bpmn_name == step_name,
'Expected step %s, got %s (%s)' % (step_name, tasks[0].task_spec.bpmn_name, tasks[0].task_spec.name))
if not set_attribs:
set_attribs = {}
if choice:
set_attribs['choice'] = choice
if set_attribs:
tasks[0].set_data(**set_attribs)
tasks[0].run()
def save_restore(self):
script_engine = self.workflow.script_engine
before_state = self._get_workflow_state(do_steps=False)
before_dump = self.workflow.get_dump()
# Check that we can actully convert this to JSON
json_str = json.dumps(before_state)
after = self.serializer.from_dict(json.loads(json_str))
# Check that serializing and deserializing results in the same workflow
after_state = self.serializer.to_dict(after)
after_dump = after.get_dump()
self.maxDiff = None
self.assertEqual(before_dump, after_dump)
self.assertEqual(before_state, after_state)
self.workflow = after
self.workflow.script_engine = script_engine
def restore(self, state):
self.workflow = self.serializer.workflow_from_dict(state)
def _get_workflow_state(self, do_steps=True):
if do_steps:
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
return self.serializer.to_dict(self.workflow)
| 5,734 | Python | .py | 114 | 39.587719 | 124 | 0.629815 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
898 | DataObjectTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/DataObjectTest.py | from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.bpmn.exceptions import WorkflowDataException
class DataObjectReferenceTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('data_object.bpmn', 'Process')
def testDataObjectReferences(self):
self.actual_test(False)
def testDataObjectSerialization(self):
self.actual_test(True)
def testMissingDataInput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
# Add the data so that we can advance the workflow
ready_tasks = self.get_ready_user_tasks()
ready_tasks[0].data = { 'obj_1': 'hello' }
ready_tasks[0].run()
# Remove the data before advancing
ready_tasks = self.get_ready_user_tasks()
self.workflow.data_objects.pop('obj_1')
with self.assertRaises(WorkflowDataException) as exc:
ready_tasks[0].run()
self.assertEqual(exc.data_output.name, 'obj_1')
def testMissingDataOutput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
with self.assertRaises(WorkflowDataException) as exc:
ready_tasks[0].run()
self.assertEqual(exc.data_output.name, 'obj_1')
def actual_test(self, save_restore):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
# Set up the data
ready_tasks = self.get_ready_user_tasks()
ready_tasks[0].data = { 'obj_1': 'hello' }
ready_tasks[0].run()
# After task completion, obj_1 should be copied out of the task into the workflow
self.assertNotIn('obj_1', ready_tasks[0].data)
self.assertIn('obj_1', self.workflow.data_objects)
if save_restore:
self.save_restore()
# Set a value for obj_1 in the task data again
ready_tasks = self.get_ready_user_tasks()
ready_tasks[0].data = { 'obj_1': 'hello again' }
ready_tasks[0].run()
# Check to make sure we use the workflow value instead of the value we set
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(ready_tasks[0].data['obj_1'], 'hello')
# Modify the value in the task
ready_tasks[0].data = { 'obj_1': 'hello again' }
ready_tasks[0].run()
# We did not set an output data reference so obj_1 should remain unchanged in the workflow data
# and be removed from the task data
self.assertNotIn('obj_1', ready_tasks[0].data)
self.assertEqual(self.workflow.data_objects['obj_1'], 'hello')
if save_restore:
self.save_restore()
# Make sure data objects are accessible inside a subprocess
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(ready_tasks[0].data['obj_1'], 'hello')
ready_tasks[0].data['obj_1'] = 'hello again'
ready_tasks[0].run()
self.workflow.do_engine_steps()
sp = self.workflow.get_next_task(spec_name='subprocess')
# It was copied out
self.assertNotIn('obj_1', sp.data)
# The update should persist in the main process
self.assertEqual(self.workflow.data_objects['obj_1'], 'hello again')
class DataObjectGatewayTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('data_object_gateway.bpmn', 'main')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testExpression(self):
task = self.get_ready_user_tasks()[0]
# Set the data object
task.data = {'val': True}
task.run()
# The gateway depends on the value of the data object
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
completed = [task.task_spec.name for task in self.workflow.get_tasks()]
self.assertIn('yes', completed)
self.assertNotIn('no', completed)
# The data object was removed by the script engine
self.assertNotIn('val', self.workflow.last_task.data)
| 4,362 | Python | .py | 88 | 40.818182 | 103 | 0.660311 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
899 | SequentialMultiInstanceTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/SequentialMultiInstanceTest.py | from SpiffWorkflow.bpmn.exceptions import WorkflowDataException
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.bpmn.specs.data_spec import TaskDataReference
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class BaseTestCase(BpmnWorkflowTestCase):
def set_io_and_run_workflow(self, data, data_input=None, data_output=None, save_restore=False):
start = self.workflow.task_tree
start.data = data
any_task = self.workflow.get_next_task(spec_name='any_task')
any_task.task_spec.data_input = TaskDataReference(data_input) if data_input is not None else None
any_task.task_spec.data_output = TaskDataReference(data_output) if data_output is not None else None
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.get_ready_user_tasks()
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(len(task_info['completed']), 0)
self.assertEqual(len(task_info['running']), 1)
self.assertEqual(len(task_info['future']), 2)
self.assertEqual(len(task_info['instance_map']), 1)
ready_tasks = self.get_ready_user_tasks()
while len(ready_tasks) > 0:
self.assertEqual(len(ready_tasks), 1)
task = ready_tasks[0]
self.assertEqual(task.task_spec.name, 'any_task [child]')
self.assertIn('input_item', task.data)
task.data['output_item'] = task.data['input_item'] * 2
task.run()
if save_restore:
self.save_restore()
ready_tasks = self.get_ready_user_tasks()
self.workflow.do_engine_steps()
any_task = self.workflow.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(len(task_info['completed']), 3)
self.assertEqual(len(task_info['running']), 0)
self.assertEqual(len(task_info['future']), 0)
self.assertEqual(len(task_info['instance_map']), 3)
children = self.workflow.get_tasks(spec_name='any_task [child]')
for child in children:
info = child.task_spec.task_info(child)
instance = info['instance']
self.assertEqual(task_info['instance_map'][instance], str(child.id))
self.assertEqual(len(children), 3)
self.assertTrue(self.workflow.completed)
def run_workflow_with_condition(self, data, condition):
start = self.workflow.task_tree
start.data = data
task = self.workflow.get_next_task(spec_name='any_task')
task.task_spec.condition = condition
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.get_ready_user_tasks()
while len(ready_tasks) > 0:
ready = ready_tasks[0]
self.assertEqual(ready.task_spec.name, 'any_task [child]')
self.assertIn('input_item', ready.data)
ready.data['output_item'] = ready.data['input_item'] * 2
ready.run()
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.get_ready_user_tasks()
self.workflow.do_engine_steps()
children = self.workflow.get_tasks(spec_name='any_task [child]')
self.assertEqual(len(children), 2)
self.assertTrue(self.workflow.completed)
class SequentialMultiInstanceExistingOutputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('sequential_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testListWithDictOutput(self):
data = {
'input_data': [1, 2, 3],
'output_data': {},
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': {0: 2, 1: 4, 2: 6},
})
def testDictWithListOutput(self):
data = {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': [],
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': [2, 4, 6],
})
def testNonEmptyOutput(self):
with self.assertRaises(WorkflowDataException) as exc:
data = {
'input_data': [1, 2, 3],
'output_data': [1, 2, 3],
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertEqual(exc.exception.message,
"If the input is not being updated in place, the output must be empty or it must be a map (dict)")
def testInvalidOutputType(self):
with self.assertRaises(WorkflowDataException) as exc:
data = {
'input_data': set([1, 2, 3]),
'output_data': set(),
}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertEqual(exc.exception.message, "Only a mutable map (dict) or sequence (list) can be used for output")
class SequentialMultiInstanceNewOutputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('sequential_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testList(self):
data = {'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [2, 4, 6]
})
def testListSaveRestore(self):
data = {'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [2, 4, 6]
})
def testDict(self):
data = {'input_data': {'a': 1, 'b': 2, 'c': 3} }
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': {'a': 2, 'b': 4, 'c': 6}
})
def testDictSaveRestore(self):
data = {'input_data': {'a': 1, 'b': 2, 'c': 3} }
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {
'input_data': {'a': 1, 'b': 2, 'c': 3},
'output_data': {'a': 2, 'b': 4, 'c': 6}
})
def testSet(self):
data = {'input_data': set([1, 2, 3])}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='output_data')
self.assertDictEqual(self.workflow.data, {
'input_data': set([1, 2, 3]),
'output_data': [2, 4, 6]
})
def testEmptyCollection(self):
start = self.workflow.task_tree
start.data = {'input_data': []}
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'input_data': [], 'output_data': []})
def testCondition(self):
self.run_workflow_with_condition({'input_data': [1, 2, 3]}, "input_item == 2")
self.assertDictEqual(self.workflow.data, {
'input_data': [1, 2, 3],
'output_data': [2, 4]
})
class SequentialMultiInstanceUpdateInputTest(BaseTestCase):
def setUp(self):
self.spec, subprocess = self.load_workflow_spec('sequential_multiinstance_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testList(self):
data = { 'input_data': [1, 2, 3]}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='input_data')
self.assertDictEqual(self.workflow.data, {'input_data': [2, 4, 6]})
def testDict(self):
data = { 'input_data': {'a': 1, 'b': 2, 'c': 3}}
self.set_io_and_run_workflow(data, data_input='input_data', data_output='input_data')
self.assertDictEqual(self.workflow.data, {'input_data': {'a': 2, 'b': 4, 'c': 6}})
class SequentialMultiInstanceWithCardinality(BaseTestCase):
def setUp(self) -> None:
self.spec, subprocess = self.load_workflow_spec('sequential_multiinstance_cardinality.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testCardinality(self):
self.set_io_and_run_workflow({}, data_output='output_data')
self.assertDictEqual(self.workflow.data, {'output_data': [0, 2, 4]})
def testCardinalitySaveRestore(self):
self.set_io_and_run_workflow({}, data_output='output_data', save_restore=True)
self.assertDictEqual(self.workflow.data, {'output_data': [0, 2, 4]})
def testCondition(self):
self.run_workflow_with_condition({}, "input_item == 1")
self.assertDictEqual(self.workflow.data, {
'output_data': [0, 2]
})
| 9,384 | Python | .py | 184 | 41.255435 | 122 | 0.614006 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
Subsets and Splits