id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
900 | IOSpecTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/IOSpecTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.exceptions import WorkflowDataException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class CallActivityDataTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('io_spec*.bpmn', 'parent')
def testCallActivityWithIOSpec(self):
self.actual_test()
def testCallActivityWithIOSpecSaveRestore(self):
self.actual_test(True)
def testCallActivityMissingInput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
set_data = self.workflow.spec.task_specs['Activity_0haob58']
set_data.script = """in_1, unused = 1, True"""
with self.assertRaises(WorkflowDataException) as exc:
self.advance_to_subprocess()
self.assertEqual(exc.exception.data_input.bpmn_id, 'in_2')
def testCallActivityMissingOutput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
script_task = self.workflow.spec.task_specs['Activity_0haob58']
script_task.script = """in_1, in_2, unused = 1, "hello world", True"""
self.advance_to_subprocess()
task = self.workflow.get_tasks(state=TaskState.READY)[0]
transform_task = task.workflow.spec.task_specs['Activity_04d94ee']
transform_task.script = """out_1, unused = in_1 * 2, False"""
with self.assertRaises(WorkflowDataException) as exc:
self.complete_subprocess()
self.assertEqual(exc.exception.data_output.bpmn_id, 'out_2')
def actual_test(self, save_restore=False):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
set_data = self.workflow.spec.task_specs['Activity_0haob58']
set_data.script = """in_1, in_2, unused = 1, "hello world", True"""
if save_restore:
self.save_restore()
self.advance_to_subprocess()
# This will be the first task of the subprocess
task = self.workflow.get_tasks(state=TaskState.READY)[0]
# These should be copied
self.assertIn('in_1', task.data)
self.assertIn('in_2', task.data)
# This should not
self.assertNotIn('unused', task.data)
self.complete_subprocess()
# This is the subprocess
task = self.workflow.get_next_task(spec_name='Activity_1wdjypm')
# Originals should not change
self.assertEqual(task.data['in_1'], 1)
self.assertEqual(task.data['in_2'], "hello world")
self.assertEqual(task.data['unused'], True)
# New variables should be present
self.assertEqual(task.data['out_1'], 2)
self.assertEqual(task.data['out_2'], "HELLO WORLD")
def advance_to_subprocess(self):
# Once we enter the subworkflow it becomes a waiting task
started = self.workflow.get_tasks(state=TaskState.STARTED)
while len(started) == 0:
next_task = self.workflow.get_next_task(state=TaskState.READY)
next_task.run()
started = self.workflow.get_tasks(state=TaskState.STARTED)
def complete_subprocess(self):
# Complete the ready tasks in the subprocess
ready = self.workflow.get_tasks(state=TaskState.READY)
while len(ready) > 0:
ready[0].run()
ready = self.workflow.get_tasks(state=TaskState.READY)
class IOSpecOnTaskTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('io_spec_on_task.bpmn', 'main')
def testIOSpecOnTask(self):
self.actual_test()
def testIOSpecOnTaskSaveRestore(self):
self.actual_test(True)
def testIOSpecOnTaskMissingInput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
set_data = self.workflow.spec.task_specs['set_data']
set_data.script = """in_1, unused = 1, True"""
with self.assertRaises(WorkflowDataException) as exc:
self.workflow.do_engine_steps()
self.assertEqual(exc.exception.data_input.bpmn_id, 'in_2')
def testIOSpecOnTaskMissingOutput(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(spec_name='any_task')
task.data.update({'out_1': 1})
with self.assertRaises(WorkflowDataException) as exc:
task.run()
self.assertEqual(exc.exception.data_output.bpmn_id, 'out_2')
def actual_test(self, save_restore=False):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
task = self.workflow.get_next_task(spec_name='any_task')
self.assertDictEqual(task.data, {'in_1': 1, 'in_2': 'hello world'})
task.data.update({'out_1': 1, 'out_2': 'bye', 'extra': True})
task.run()
self.workflow.do_engine_steps()
self.assertDictEqual(self.workflow.last_task.data, {'out_1': 1, 'out_2': 'bye'})
| 5,101 | Python | .py | 99 | 42.848485 | 94 | 0.670688 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
901 | BpmnLoaderForTests.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py | from copy import deepcopy
from SpiffWorkflow.bpmn.specs.data_spec import BpmnDataStoreSpecification
from SpiffWorkflow.bpmn.specs.defaults import ExclusiveGateway, UserTask
from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser
from SpiffWorkflow.bpmn.parser.TaskParser import TaskParser
from SpiffWorkflow.bpmn.parser.task_parsers import ConditionalGatewayParser
from SpiffWorkflow.bpmn.parser.util import full_tag
from SpiffWorkflow.bpmn.serializer.helpers.bpmn_converter import BpmnConverter
from SpiffWorkflow.bpmn.serializer.default.task_spec import BpmnTaskSpecConverter
from SpiffWorkflow.bpmn.serializer import DEFAULT_CONFIG
__author__ = 'matth'
# One glorious day I will be able to remove these classes.
class TestUserTask(UserTask):
def get_user_choices(self):
if not self.outputs:
return []
assert len(self.outputs) == 1
next_node = self.outputs[0]
if isinstance(next_node, ExclusiveGateway):
return next_node.get_outgoing_sequence_names()
return self.get_outgoing_sequence_names()
def do_choice(self, task, choice):
task.set_data(choice=choice)
task.run()
class TestExclusiveGatewayParser(ConditionalGatewayParser):
def parse_condition(self, sequence_flow_node):
cond = super().parse_condition(sequence_flow_node)
if cond is not None:
return cond
return "choice == '%s'" % sequence_flow_node.get('name', None)
class TestDataStore(BpmnDataStoreSpecification):
_value = None
def get(self, my_task):
"""Copy a value from a data store into task data."""
my_task.data[self.bpmn_id] = TestDataStore._value
def set(self, my_task):
"""Copy a value from the task data to the data store"""
TestDataStore._value = my_task.data[self.bpmn_id]
del my_task.data[self.bpmn_id]
def delete(self, my_task):
del my_task.data[self.bpmn_id]
class TestDataStoreConverter(BpmnConverter):
def to_dict(self, spec):
return {
"bpmn_id": spec.bpmn_id,
"bpmn_name": spec.bpmn_name,
"capacity": spec.capacity,
"is_unlimited": spec.is_unlimited,
"_value": TestDataStore._value,
}
def from_dict(self, dct):
_value = dct.pop("_value")
data_store = TestDataStore(**dct)
TestDataStore._value = _value
return data_store
class TestBpmnParser(BpmnParser):
OVERRIDE_PARSER_CLASSES = {
full_tag('userTask'): (TaskParser, TestUserTask),
full_tag('exclusiveGateway'): (TestExclusiveGatewayParser, ExclusiveGateway),
}
DATA_STORE_CLASSES = {
"TestDataStore": TestDataStore,
}
SERIALIZER_CONFIG = deepcopy(DEFAULT_CONFIG)
SERIALIZER_CONFIG[TestUserTask] = BpmnTaskSpecConverter
SERIALIZER_CONFIG[TestDataStore] = TestDataStoreConverter
| 2,880 | Python | .py | 66 | 37.015152 | 85 | 0.710856 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
902 | StandardLoopTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/StandardLoopTest.py | import os
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser, ValidationException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
class StandardLoopTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('standard_loop.bpmn','main', validate=False)
# This spec has a loop task with loopMaximum = 3 and loopCondition = 'done'
self.workflow = BpmnWorkflow(spec, subprocesses)
def testLoopMaximum(self):
start = self.workflow.get_tasks(end_at_spec='StartEvent_1')
start[0].data['done'] = False
any_task = self.workflow.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(task_info['iterations_completed'], 0)
self.assertEqual(task_info['iterations_remaining'], 3)
self.assertEqual(len(task_info['instance_map']), 0)
for idx in range(3):
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 1)
ready_tasks[0].data[str(idx)] = True
ready_tasks[0].run()
task_info = ready_tasks[0].task_spec.task_info(ready_tasks[0])
self.assertEqual(task_info['iteration'], idx)
self.workflow.do_engine_steps()
any_task = self.workflow.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(task_info['iterations_completed'], 3)
self.assertEqual(task_info['iterations_remaining'], 0)
self.assertEqual(len(task_info['instance_map']), 3)
self.assertTrue(self.workflow.completed)
def testLoopCondition(self):
start = self.workflow.get_tasks(end_at_spec='StartEvent_1')
start[0].data['done'] = False
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 1)
ready_tasks[0].data['done'] = True
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testSkipLoop(self):
# This is called "skip loop" because I thought "testTestBefore" was a terrible name
start = self.workflow.get_tasks(end_at_spec='StartEvent_1')
start[0].data['done'] = True
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
class ParseStandardLoop(BpmnWorkflowTestCase):
def testParseStandardLoop(self):
parser = BpmnParser()
# This process has neither a loop condition nor a loop maximum
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', 'standard_loop_invalid.bpmn')
parser.add_bpmn_file(bpmn_file)
self.assertRaises(ValidationException, parser.get_spec, 'main')
| 3,096 | Python | .py | 59 | 43.779661 | 97 | 0.677045 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
903 | ExclusiveGatewayNoDefaultTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ExclusiveGatewayNoDefaultTest.py | import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.exceptions import WorkflowException
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'essweine'
class ExclusiveGatewayNoDefaultTest(BpmnWorkflowTestCase):
"""The example bpmn diagram tests both a set cardinality from user input
as well as looping over an existing array."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('exclusive_gateway_no_default.bpmn', 'NoDefaultGateway')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
first = self.workflow.get_next_task(end_at_spec='StartEvent_1')
first.data = { 'x': 1 }
self.assertRaises(WorkflowException, self.workflow.do_engine_steps)
task = self.workflow.get_next_task(spec_name='Gateway_CheckValue')
self.assertEqual(task.state, TaskState.ERROR)
| 955 | Python | .py | 18 | 47.666667 | 109 | 0.764516 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
904 | DataStoreReferenceTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/DataStoreReferenceTest.py | from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
class DataStoreReferenceTest(BpmnWorkflowTestCase):
def _do_engine_steps(self, file, processid, save_restore):
spec, subprocesses = self.load_workflow_spec('data_store.bpmn', 'JustDataStoreRef')
self.workflow = BpmnWorkflow(spec, subprocesses)
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
def _check_last_script_task_data(self):
last_script_task_data = self.workflow.get_next_task(spec_name="Activity_1skgyn9").data
self.assertEqual(len(last_script_task_data), 1)
self.assertEqual(last_script_task_data["x"], "Sue")
def testCanInterpretDataStoreReferenceWithInputsAndOutputs(self):
self._do_engine_steps('data_store.bpmn', 'JustDataStoreRef', False)
self._check_last_script_task_data()
def testCanSaveRestoreDataStoreReferenceWithInputsAndOutputs(self):
self._do_engine_steps('data_store.bpmn', 'JustDataStoreRef', True)
self._check_last_script_task_data()
def testSeparateWorkflowInstancesCanShareDataUsingDataStores(self):
self._do_engine_steps('data_store_write.bpmn', 'JustDataStoreRef', False)
self._do_engine_steps('data_store_read.bpmn', 'JustDataStoreRef', False)
self._check_last_script_task_data()
def testSeparateRestoredWorkflowInstancesCanShareDataUsingDataStores(self):
self._do_engine_steps('data_store_write.bpmn', 'JustDataStoreRef', True)
self._do_engine_steps('data_store_read.bpmn', 'JustDataStoreRef', True)
self._check_last_script_task_data()
| 1,676 | Python | .py | 27 | 54.444444 | 94 | 0.733861 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
905 | PythonScriptEngineEnvironmentTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/PythonScriptEngineEnvironmentTest.py | import json
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from SpiffWorkflow.bpmn.script_engine.python_environment import BasePythonScriptEngineEnvironment, TaskDataEnvironment
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
def example_global():
pass
class NonTaskDataExampleEnvironment(BasePythonScriptEngineEnvironment):
def __init__(self, environment_globals, environment):
self.environment = environment
self.environment.update(environment_globals)
super().__init__(environment_globals)
def evaluate(self, expression, context, external_methods=None):
pass
def execute(self, script, context, external_methods=None):
self.environment.update(context)
self.environment.update(external_methods or {})
exec(script, self.environment)
self.environment = {k: v for k, v in self.environment.items() if k not in external_methods}
return True
def user_defined_values(self):
return {k: v for k, v in self.environment.items() if k not in self.globals}
class AsyncScriptEnvironment(TaskDataEnvironment):
def execute(self, script, context, external_methods=None):
super().execute(script, context, external_methods)
return None
class PythonScriptEngineEnvironmentTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('task_data_size.bpmn', 'Process_ccz6oq2')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testTaskDataSizeWithDefaultPythonScriptEngine(self):
self.workflow.do_engine_steps()
self.assertIn("a", self.workflow.data)
self.assertIn("b", self.workflow.data)
self.assertIn("c", self.workflow.data)
self.assertIn("d", self.workflow.data)
task_data_len = self._get_task_data_len()
d_uniques = set(self.workflow.data["d"])
d_len = len(self.workflow.data["d"])
self.assertGreater(task_data_len, 15000)
self.assertEqual(d_len, 512*3)
self.assertEqual(d_uniques, {"a", "b", "c"})
def testTaskDataSizeWithNonTaskDataEnvironmentBasedPythonScriptEngine(self):
script_engine_environment = NonTaskDataExampleEnvironment({"example_global": example_global}, {})
script_engine = PythonScriptEngine(environment=script_engine_environment)
self.workflow.script_engine = script_engine
self.workflow.do_engine_steps()
self.workflow.data.update(script_engine.environment.user_defined_values())
self.assertIn("a", self.workflow.data)
self.assertIn("b", self.workflow.data)
self.assertIn("c", self.workflow.data)
self.assertIn("d", self.workflow.data)
self.assertNotIn("example_global", self.workflow.data)
task_data_len = self._get_task_data_len()
d_uniques = set(self.workflow.data["d"])
d_len = len(self.workflow.data["d"])
self.assertEqual(task_data_len, 2)
self.assertEqual(d_len, 512*3)
self.assertEqual(d_uniques, {"a", "b", "c"})
def _get_task_data_len(self):
tasks_to_check = self.workflow.get_tasks(state=TaskState.FINISHED_MASK)
task_data = [task.data for task in tasks_to_check]
task_data_to_check = list(filter(len, task_data))
task_data_len = len(json.dumps(task_data_to_check))
return task_data_len
class StartedTaskTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('script-start.bpmn', 'Process_cozt5fu')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testStartedState(self):
script_engine_environemnt = AsyncScriptEnvironment()
script_engine = PythonScriptEngine(environment=script_engine_environemnt)
self.workflow.script_engine = script_engine
self.workflow.do_engine_steps()
script_task = self.workflow.get_next_task(spec_name='script')
self.assertEqual(script_task.state, TaskState.STARTED)
script_task.complete()
manual_task = self.workflow.get_next_task(spec_name='manual')
manual_task.run()
self.workflow.do_engine_steps()
end = self.workflow.get_next_task(spec_name='End')
self.assertDictEqual(end.data, {'x': 1, 'y': 2, 'z': 3})
self.assertTrue(self.workflow.completed)
| 4,429 | Python | .py | 84 | 45.011905 | 118 | 0.705651 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
906 | NestedProcessesTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/NestedProcessesTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'neilc'
class NestedProcessesTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Nested*.bpmn20.xml',
'sid-a12cf1e5-86f4-4d69-9790-6a90342f5963')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.complete_task('Action1', True)
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.complete_task('Action2', True)
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.complete_task('Action3', True)
self.assertTrue(self.workflow.completed)
def testResetToTop(self):
self.complete_task('Action1', True)
self.complete_task('Action2', True)
self.complete_task('Action3', True)
task = [t for t in self.workflow.get_tasks() if t.task_spec.bpmn_name == 'Action1'][0]
self.workflow.reset_from_task_id(task.id)
self.assertEqual(task.state, TaskState.READY)
self.assertEqual(len(self.workflow.subprocesses), 0)
task.run()
self.complete_task('Action2')
self.complete_task('Action3')
self.assertTrue(self.workflow.completed)
def testResetToIntermediate(self):
self.complete_task('Action1', True)
self.complete_task('Action2', True)
self.complete_task('Action3', True)
task = [t for t in self.workflow.get_tasks() if t.task_spec.bpmn_name == 'Action2'][0]
sub = [t for t in self.workflow.get_tasks() if t.task_spec.bpmn_name == 'Nested level 1'][0]
self.workflow.reset_from_task_id(task.id)
self.assertEqual(task.state, TaskState.READY)
self.assertEqual(sub.state, TaskState.STARTED)
self.assertEqual(len(self.workflow.subprocesses), 1)
task.run()
self.complete_task('Action3')
self.assertTrue(self.workflow.completed)
def testResetToSubworkflow(self):
self.complete_task('Action1', True)
self.complete_task('Action2', True)
self.complete_task('Action3', True)
# "Nested level 1"
task = self.workflow.get_next_task(spec_name='sid-C014B4B9-889F-4EE9-9949-C89502C35CF0')
self.workflow.reset_from_task_id(task.id)
self.workflow.do_engine_steps()
self.assertEqual(len(self.workflow.subprocesses), 1)
self.assertEqual(task.state, TaskState.STARTED)
self.complete_task('Action2', True)
self.complete_task('Action3', True)
self.assertTrue(self.workflow.completed)
def complete_task(self, name, save_restore=False):
self.do_next_named_step(name)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
| 2,928 | Python | .py | 60 | 40.316667 | 100 | 0.678722 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
907 | ProcessParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/ProcessParserTest.py | import io
import os
import unittest
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser
def _process_parser(bpmn_filename, process_id):
parser = BpmnParser()
bpmn_file = os.path.join(os.path.dirname(__file__), 'data', bpmn_filename)
parser.add_bpmn_file(bpmn_file)
return parser.get_process_parser(process_id)
class ProcessParserTest(unittest.TestCase):
def testReturnsEmptyListIfNoCallActivities(self):
parser = _process_parser("no-tasks.bpmn", "no_tasks")
assert parser.called_element_ids() == []
def testHandlesSingleCallActivity(self):
parser = _process_parser("single_call_activity.bpmn", "Process_p4pfxhq")
assert parser.called_element_ids() == ["SingleTask_Process"]
def testHandlesMultipleCallActivities(self):
parser = _process_parser("multiple_call_activities.bpmn", "Process_90mmqlw")
assert parser.called_element_ids() == ["Process_sypm122", "Process_diu8ta2", "Process_l14lar1"]
def testHandlesNestedCallActivity(self):
parser = _process_parser("nested_call_activity.bpmn", "Process_expand_call_activity")
assert parser.called_element_ids() == ["is_this_missing", "set_permissions_process"]
def testCanAddDmnFromString(self):
parser = BpmnDmnParser()
parser.add_dmn_str(EMPTY_DMN)
assert len(parser.dmn_parsers) > 0
def testCanAddDmnFromFileLikeObject(self):
parser = BpmnDmnParser()
parser.add_dmn_io(io.StringIO(EMPTY_DMN))
assert len(parser.dmn_parsers) > 0
def testCanAddBpmnFromString(self):
parser = BpmnParser()
parser.add_bpmn_str(EMPTY_WORKFLOW)
assert parser.get_spec("no_tasks") is not None
def testCanAddBpmnFromFileLikeObject(self):
parser = BpmnParser()
parser.add_bpmn_io(io.StringIO(EMPTY_WORKFLOW))
assert parser.get_spec("no_tasks") is not None
EMPTY_WORKFLOW = """
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL"
xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI"
xmlns:dc="http://www.omg.org/spec/DD/20100524/DC"
xmlns:di="http://www.omg.org/spec/DD/20100524/DI"
id="Definitions_96f6665"
targetNamespace="http://bpmn.io/schema/bpmn"
exporter="Camunda Modeler" exporterVersion="3.0.0-dev">
<bpmn:process id="no_tasks" name="No Tasks" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_184umot</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:endEvent id="Event_0qq9il3">
<bpmn:incoming>Flow_184umot</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_184umot" sourceRef="StartEvent_1" targetRef="Event_0qq9il3" />
</bpmn:process>
</bpmn:definitions>
"""
EMPTY_DMN = """
<definitions xmlns="https://www.omg.org/spec/DMN/20191111/MODEL/"
xmlns:dmndi="https://www.omg.org/spec/DMN/20191111/DMNDI/"
xmlns:dc="http://www.omg.org/spec/DMN/20180521/DC/"
id="Definitions_76910d7" name="DRD" namespace="http://camunda.org/schema/1.0/dmn">
<decision id="decision_1" name="Decision 1">
<decisionTable id="decisionTable_1">
<input id="input_1" label="First Name">
<inputExpression id="inputExpression_1" typeRef="string">
<text></text>
</inputExpression>
</input>
<output id="output_1" label="Last Name" typeRef="string" />
</decisionTable>
</decision>
</definitions>
"""
| 3,449 | Python | .py | 75 | 40.653333 | 103 | 0.709908 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
908 | ParallelManyThreadsAtSamePointTestNested.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelManyThreadsAtSamePointTestNested.py | import logging
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseParallelTestCase import BaseParallelTestCase
__author__ = 'matth'
class ParallelManyThreadsAtSamePointTestNested(BaseParallelTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Many-Threads-At-Same-Point-Nested.bpmn20.xml',
'Parallel Many Threads At Same Point Nested')
self.workflow = BpmnWorkflow(spec, subprocesses)
def test_depth_first(self):
instructions = []
for split1 in ['SP 1', 'SP 2']:
for sp in ['A', 'B']:
for split2 in ['1', '2']:
for t in ['A', 'B']:
instructions.append(split1 + sp + "|" + split2 + t)
instructions.append(split1 + sp + "|" + 'Inner Done')
instructions.append("!" + split1 + sp + "|" + 'Inner Done')
if sp == 'A':
instructions.append("!Outer Done")
instructions.append('Outer Done')
instructions.append("!Outer Done")
logging.info('Doing test with instructions: %s', instructions)
self._do_test(instructions, only_one_instance=False, save_restore=True)
def test_breadth_first(self):
instructions = []
for t in ['A', 'B']:
for split2 in ['1', '2']:
for sp in ['A', 'B']:
for split1 in ['SP 1', 'SP 2']:
instructions.append(split1 + sp + "|" + split2 + t)
for split1 in ['SP 1', 'SP 2']:
for sp in ['A', 'B']:
for split2 in ['1', '2']:
instructions += [split1 + sp + "|" + 'Inner Done']
for split1 in ['SP 1', 'SP 2']:
instructions += ['Outer Done']
logging.info('Doing test with instructions: %s', instructions)
self._do_test(instructions, only_one_instance=False, save_restore=True)
| 1,979 | Python | .py | 40 | 37.275 | 83 | 0.551116 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
909 | ParallelOnePathEndsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelOnePathEndsTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelOnePathEndsTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-One-Path-Ends.bpmn20.xml',
'sid-33b2dda8-ca46-47ca-9f08-43de73abde9e')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughParallelTaskFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Choice 1', choice='No')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRunThroughChoiceFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='No')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRunThroughParallelTaskFirstYes(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 2,382 | Python | .py | 44 | 45.795455 | 98 | 0.702458 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
910 | ParallelLoopingAfterJoinTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelLoopingAfterJoinTest.py | from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseParallelTestCase import BaseParallelTestCase
__author__ = 'matth'
class ParallelLoopingAfterJoinTest(BaseParallelTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Looping-After-Join.bpmn20.xml',
'sid-41eb2b6c-08bc-4a61-b38b-5f32052139c5')
self.workflow = BpmnWorkflow(spec, subprocesses)
def test1(self):
self._do_test(
['Go', '1', '2', '2A', '2B', '2 Done', ('Retry?', 'No'), 'Done'], save_restore=True)
def test2(self):
self._do_test(
['Go', '1', '2', '2A', '2B', '2 Done', ('Retry?', 'Yes'), 'Go',
'1', '2', '2A', '2B', '2 Done', ('Retry?', 'No'), 'Done'], save_restore=True)
| 802 | Python | .py | 16 | 41.625 | 98 | 0.596919 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
911 | ParallelMultipleSplitsAndJoinsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelMultipleSplitsAndJoinsTest.py | from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseParallelTestCase import BaseParallelTestCase
__author__ = 'matth'
class ParallelMultipleSplitsAndJoinsTest(BaseParallelTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Multiple-Splits-And-Joins.bpmn20.xml',
'sid-a90fa1f1-32a9-4a62-8ad4-8820a3fc6cc4')
self.workflow = BpmnWorkflow(spec, subprocesses)
def test1(self):
self._do_test(['1', '!Done', '2', '1A', '!Done', '2A', '1B', '2B',
'!Done', '1 Done', '!Done', '2 Done', 'Done'], save_restore=True)
def test2(self):
self._do_test(
['1', '!Done', '1A', '1B', '1 Done', '!Done', '2', '2A', '2B', '2 Done', 'Done'], save_restore=True)
def test3(self):
self._do_test(['1', '2', '!Done', '1B', '2B', '!2 Done', '1A',
'!Done', '2A', '1 Done', '!Done', '2 Done', 'Done'], save_restore=True)
def test4(self):
self._do_test(
['1', '1B', '1A', '1 Done', '!Done', '2', '2B', '2A', '2 Done', 'Done'], save_restore=True)
| 1,132 | Python | .py | 21 | 44.857143 | 112 | 0.565336 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
912 | ParallelJoinLongTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelJoinLongTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelJoinLongTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Join-Long.bpmn20.xml',
'sid-9274d78f-68da-4da6-a369-8a64feb3de52')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughAlternating(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Thread 1 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Thread 2 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
for i in range(1, 13):
self.do_next_named_step('Thread 1 - Task %d' % i, with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Thread 2 - Task %d' % i, with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Done', with_save_load=True)
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRunThroughThread1First(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Thread 1 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
for i in range(1, 13):
self.do_next_named_step('Thread 1 - Task %d' % i)
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_named_step('Thread 2 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
for i in range(1, 13):
self.do_next_named_step('Thread 2 - Task %d' % i, with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Done', with_save_load=True)
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 2,407 | Python | .py | 42 | 48.190476 | 98 | 0.672487 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
913 | ParallelOrderTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelOrderTest.py | from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class ParallelOrderTest(BpmnWorkflowTestCase):
"""The example bpmn diagram has a 4 parallel workflows, this
verifies that the parallel tasks have a natural order that follows
the visual layout of the diagram, rather than just the order in which
they were created. """
def setUp(self):
spec, subprocesses = self.load_workflow_spec('ParallelOrder.bpmn','ParallelOrder')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.workflow.do_engine_steps()
self.assertFalse(self.workflow.completed)
self.assertEqual(4, len(self.get_ready_user_tasks()))
tasks = self.get_ready_user_tasks()
self.assertEqual("Task 1", tasks[0].task_spec.bpmn_name)
self.assertEqual("Task 2", tasks[1].task_spec.bpmn_name)
self.assertEqual("Task 3", tasks[2].task_spec.bpmn_name)
self.assertEqual("Task 4", tasks[3].task_spec.bpmn_name)
| 1,051 | Python | .py | 19 | 48.473684 | 90 | 0.725854 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
914 | ParallelJoinLongInclusiveTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelJoinLongInclusiveTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelJoinLongInclusiveTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Join-Long-Inclusive.bpmn20.xml',
'sid-bae04828-c969-4480-8cfe-09ad1f97d81c')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughThread1FirstThenNo(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Thread 1 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
for i in range(1, 13):
self.do_next_named_step('Thread 1 - Task %d' % i)
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_named_step('Thread 2 - Choose', choice='No', with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Done', with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Thread 2 - No Task', with_save_load=True)
self.workflow.do_engine_steps()
self.assertEqual(
0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testNoFirstThenThread1(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Thread 2 - Choose', choice='No', with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Thread 1 - Choose', choice='Yes', with_save_load=True)
self.workflow.do_engine_steps()
for i in range(1, 13):
self.do_next_named_step('Thread 1 - Task %d' % i)
self.workflow.do_engine_steps()
self.do_next_named_step('Done', with_save_load=True)
self.workflow.do_engine_steps()
self.do_next_named_step('Thread 2 - No Task', with_save_load=True)
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 2,364 | Python | .py | 42 | 47.452381 | 98 | 0.68026 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
915 | ParallelGatewayTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelGatewayTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelFromCamunda(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('Test-Workflows/Parallel.camunda.bpmn20.xml', 'Process_1hb021r')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughParallelTaskFirst(self):
# 1 first task
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('First Task')
self.save_restore()
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
# 3 parallel tasks
self.assertEqual(3, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Parallel Task A')
self.save_restore()
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Parallel Task B')
self.save_restore()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Parallel Task C')
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
# 1 last task
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Last Task')
self.save_restore()
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
def testAllParallelDataMakesItIntoGatewayTask(self):
"""It should be true that data collected across parallel tasks
is all available in the join task."""
self.do_next_named_step('First Task')
self.do_next_named_step('Parallel Task A',
set_attribs={"taskA": "taskA"})
self.do_next_named_step('Parallel Task B',
set_attribs={"taskB": "taskB"})
self.do_next_named_step('Parallel Task C',
set_attribs={"taskC": "taskC"})
self.workflow.do_engine_steps()
self.do_next_named_step('Last Task')
self.assertEqual("taskA", self.workflow.last_task.data["taskA"])
self.assertEqual("taskB", self.workflow.last_task.data["taskB"])
self.assertEqual("taskC", self.workflow.last_task.data["taskC"])
| 2,556 | Python | .py | 50 | 41.5 | 117 | 0.659719 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
916 | ParallelGatewayLoopInputTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelGatewayLoopInputTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class ParallelGatewayLoopInputTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocess_specs = self.load_workflow_spec('gateway_loop_input.bpmn', 'main')
self.workflow = BpmnWorkflow(spec, subprocess_specs)
def test_loop_input(self):
self.workflow.do_engine_steps()
ready = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(ready), 1)
ready[0].run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, { 'x': 2})
| 723 | Python | .py | 15 | 41.266667 | 91 | 0.731044 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
917 | ParallelThroughSameTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelThroughSameTaskTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelThroughSameTaskTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Through-Same-Task.bpmn20.xml',
'sid-57c563e3-fb68-4961-ae34-b6201e0c09e8')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughFirstRepeatTaskFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
# The inclusive gateway allows this to pass through (since there is a
# route to it on the same sequence flow)
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRepeatTasksReadyTogether(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(2, len(ready_tasks))
self.assertEqual(
'Repeated Task', ready_tasks[0].task_spec.bpmn_name)
ready_tasks[0].run()
self.workflow.do_engine_steps()
# The inclusive gateway allows us through here, because there is no route for the other thread
# that doesn't use the same sequence flow
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRepeatTasksReadyTogetherSaveRestore(self):
self.save_restore()
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.save_restore()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(2, len(ready_tasks))
self.assertEqual(
'Repeated Task', ready_tasks[0].task_spec.bpmn_name)
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.save_restore()
# The inclusive gateway allows us through here, because there is no route for the other thread
# that doesn't use the same sequence flow
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testNoRouteRepeatTaskFirst(self):
self.save_restore()
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.save_restore()
# The inclusive gateway allows this to pass through (since there is a
# route to it on the same sequence flow)
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Choice 1', choice='No')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('No Task')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testNoRouteNoTaskFirst(self):
self.save_restore()
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='No')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('No Task')
self.workflow.do_engine_steps()
self.save_restore()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testNoRouteNoFirstThenRepeating(self):
self.save_restore()
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='No')
self.workflow.do_engine_steps()
self.save_restore()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Repeated Task')
self.workflow.do_engine_steps()
self.save_restore()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('No Task')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 6,610 | Python | .py | 136 | 39.757353 | 102 | 0.669564 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
918 | ParallelManyThreadsAtSamePointTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelManyThreadsAtSamePointTest.py | from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseParallelTestCase import BaseParallelTestCase
__author__ = 'matth'
class ParallelManyThreadsAtSamePointTest(BaseParallelTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Many-Threads-At-Same-Point.bpmn20.xml',
'sid-6d1186e0-fc1f-43d5-bdb4-c49df043944d')
self.workflow = BpmnWorkflow(spec, subprocesses)
def test1(self):
self._do_test(['1', '2', '3', '4', 'Done', 'Done', 'Done', 'Done'],
only_one_instance=False, save_restore=True)
def test2(self):
self._do_test(['1', 'Done', '2', 'Done', '3', 'Done', '4', 'Done'],
only_one_instance=False, save_restore=True)
def test3(self):
self._do_test(['1', '2', 'Done', '3', '4', 'Done', 'Done', 'Done'],
only_one_instance=False, save_restore=True)
| 947 | Python | .py | 18 | 43.055556 | 76 | 0.613464 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
919 | ParallelMultipleSplitsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelMultipleSplitsTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelMultipleSplitsTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Multiple-Splits.bpmn20.xml',
'sid-0f63def9-833d-4bcd-a6c4-8ef84a098b1a')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughAlternating(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Do First')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 1 - Choose', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 2 - Choose', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 3 - Choose', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 1 - Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 2 - Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('SP 3 - Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 1,483 | Python | .py | 30 | 41.466667 | 98 | 0.682825 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
920 | BaseParallelTestCase.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/BaseParallelTestCase.py | import logging
from SpiffWorkflow import TaskState
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class BaseParallelTestCase(BpmnWorkflowTestCase):
def _do_test(self, order, only_one_instance=True, save_restore=False):
self.workflow.do_engine_steps()
for s in order:
choice = None
if isinstance(s, tuple):
s, choice = s
if s.startswith('!'):
logging.info("Checking that we cannot do '%s'", s[1:])
self.assertRaises(
AssertionError, self.do_next_named_step, s[1:], choice=choice)
else:
if choice is not None:
logging.info(
"Doing step '%s' (with choice='%s')", s, choice)
else:
logging.info("Doing step '%s'", s)
self.do_next_named_step(s, choice=choice, only_one_instance=only_one_instance)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
unfinished = self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)
if unfinished:
logging.debug("Unfinished tasks: %s", unfinished)
logging.debug(self.workflow.get_dump())
self.assertEqual(0, len(unfinished))
| 1,393 | Python | .py | 31 | 32.612903 | 94 | 0.582288 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
921 | ParallelThenExclusiveTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/parallel_gateway_tests/ParallelThenExclusiveTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ParallelThenExclusiveTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Then-Exclusive.bpmn20.xml',
'sid-bb9ea2d5-58b6-43c7-8e77-6e28f71106f0')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
def testRunThroughParallelTaskFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRunThroughChoiceFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
def testRunThroughChoiceThreadCompleteFirst(self):
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Choice 1', choice='Yes')
self.workflow.do_engine_steps()
self.do_next_named_step('Yes Task')
self.workflow.do_engine_steps()
self.assertRaises(AssertionError, self.do_next_named_step, 'Done')
self.do_next_named_step('Parallel Task')
self.workflow.do_engine_steps()
self.do_next_named_step('Done')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
class ParallelThenExclusiveNoInclusiveTest(ParallelThenExclusiveTest):
def setUp(self):
spec, subprocesses = self.load_workflow_spec(
'Test-Workflows/Parallel-Then-Exclusive-No-Inclusive.bpmn20.xml',
'sid-900d26c9-beab-47a4-8092-4284bfb39927')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
| 2,868 | Python | .py | 54 | 44.611111 | 98 | 0.701434 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
922 | MessageInterruptsSpTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MessageInterruptsSpTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class MessageInterruptsSpTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/*.bpmn20.xml',
'sid-607dfa9b-dbfd-41e8-94f8-42ae37f3b824',
False)
def testRunThroughHappySaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something In a Subprocess')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_exclusive_step('Ack Subprocess Done')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughInterruptSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_exclusive_step('Acknowledge SP Interrupt Message')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 2,178 | Python | .py | 43 | 42.27907 | 82 | 0.709357 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
923 | TimerCycleStartTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerCycleStartTest.py | import datetime
import time
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
counter = 0
def my_custom_function():
global counter
counter = counter+1
return counter
class CustomScriptEngine(PythonScriptEngine):
"""This is a custom script processor that can be easily injected into Spiff Workflow.
It will execute python code read in from the bpmn. It will also make any scripts in the
scripts directory available for execution. """
def __init__(self):
environment = TaskDataEnvironment({
'custom_function': my_custom_function,
'timedelta': datetime.timedelta,
})
super().__init__(environment=environment)
class TimerCycleStartTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocesses = self.load_collaboration('timer-cycle-start.bpmn', 'Collaboration_0bcl3k5')
self.workflow = BpmnWorkflow(spec, subprocesses, script_engine=CustomScriptEngine())
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
global counter
counter = 0
# We have a loop so we can continue to execute waiting tasks when
# timers expire. The test workflow has a wait timer that pauses long enough to
# allow the cycle to complete three times before being cancelled by the terminate
# event (the timer should only run twice, we want to make sure it doesn't keep
# executing)
for loopcount in range(6):
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
time.sleep(0.1)
self.workflow.refresh_waiting_tasks()
self.assertEqual(counter, 2)
self.assertTrue(self.workflow.completed)
| 2,044 | Python | .py | 45 | 38.111111 | 103 | 0.707157 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
924 | NITimerDurationBoundaryTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/NITimerDurationBoundaryTest.py | import datetime
import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class NITimerDurationTest(BpmnWorkflowTestCase):
"""
Non-Interrupting Timer boundary test
"""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('timer-non-interrupt-boundary.bpmn', 'NonInterruptTimer')
self.workflow = BpmnWorkflow(spec, subprocesses)
def load_spec(self):
return
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
event = self.workflow.get_next_task(spec_name='Event_0jyy8ao')
self.assertEqual(event.state, TaskState.WAITING)
loopcount = 0
starttime = datetime.datetime.now()
# test bpmn has a timeout of .2s; we should terminate loop before that.
# The subprocess will also wait
while event.state == TaskState.WAITING and loopcount < 10:
if save_restore:
self.save_restore()
event = self.workflow.get_next_task(spec_name='Event_0jyy8ao')
time.sleep(0.1)
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# There should be one ready task until the boundary event fires
self.assertEqual(len(self.get_ready_user_tasks()), 1)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
loopcount += 1
endtime = datetime.datetime.now()
duration = endtime - starttime
# appropriate time here is .5 seconds due to the .3 seconds that we loop and then
self.assertEqual(duration < datetime.timedelta(seconds=.5), True)
self.assertEqual(duration > datetime.timedelta(seconds=.2), True)
ready_tasks = self.get_ready_user_tasks()
# Now there should be two.
self.assertEqual(len(ready_tasks), 2)
for task in ready_tasks:
if task.task_spec.name == 'GetReason':
task.data['delay_reason'] = 'Just Because'
elif task.task_spec.name == 'Activity_Work':
task.data['work_done'] = 'Yes'
task.run()
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.completed, True)
self.assertEqual(self.workflow.last_task.data, {'work_done': 'Yes', 'delay_reason': 'Just Because'})
| 2,756 | Python | .py | 58 | 38.448276 | 110 | 0.66182 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
925 | MultipleThrowEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MultipleThrowEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class MultipleThrowEventIntermediateCatchTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, subprocesses = self.load_collaboration('multiple-throw.bpmn','top')
self.workflow = BpmnWorkflow(self.spec, subprocesses)
def testMultipleThrowEventIntermediateCatch(self):
self.actual_test()
def testMultipleThrowEventIntermediateCatchSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(len(self.workflow.get_tasks(state=TaskState.WAITING)), 0)
self.assertEqual(self.workflow.completed, True)
class MultipleThrowEventStartsEventTest(BpmnWorkflowTestCase):
def setUp(self):
specs = self.get_all_specs('multiple-throw-start.bpmn')
self.spec = specs.pop('initiate')
self.workflow = BpmnWorkflow(self.spec, specs)
def testMultipleThrowEventStartEvent(self):
self.actual_test()
def testMultipleThrowEventStartEventSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
if save_restore:
self.save_restore()
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 1)
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.completed, True)
| 1,625 | Python | .py | 35 | 38.885714 | 86 | 0.72416 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
926 | TimerCycleTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerCycleTest.py | import datetime
import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
counter = 0
def my_custom_function():
global counter
counter = counter+1
return counter
class CustomScriptEngine(PythonScriptEngine):
"""This is a custom script processor that can be easily injected into Spiff Workflow.
It will execute python code read in from the bpmn. It will also make any scripts in the
scripts directory available for execution. """
def __init__(self):
environment = TaskDataEnvironment({
'custom_function': my_custom_function,
'timedelta': datetime.timedelta,
})
super().__init__(environment=environment)
class TimerCycleTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('timer-cycle.bpmn', 'timer')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=CustomScriptEngine())
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
global counter
counter = 0
# See comments in timer cycle test start for more context
for loopcount in range(4):
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.refresh_waiting_tasks()
events = self.workflow.waiting_events()
refill = self.workflow.get_tasks(spec_name='Refill_Coffee')
# Wait time is 0.1s, with a limit of 2 children, so by the 3rd iteration, the event should be complete
if loopcount < 2:
self.assertEqual(len(events), 1)
else:
self.assertEqual(len(events), 0)
# The first child should be created after one cycle has passed
if loopcount == 0:
self.assertEqual(len(refill), 0)
time.sleep(0.1)
# Get coffee still ready
coffee = self.workflow.get_next_task(spec_name='Get_Coffee')
self.assertEqual(coffee.state, TaskState.READY)
# Timer completed
timer = self.workflow.get_next_task(spec_name='CatchMessage')
self.assertEqual(timer.state, TaskState.COMPLETED)
self.assertEqual(counter, 2)
| 2,581 | Python | .py | 57 | 36.877193 | 114 | 0.679012 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
927 | MessageNonInterruptTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class MessageNonInterruptTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/*.bpmn20.xml',
'sid-b0903a88-fe74-4f93-b912-47b815ea8d1c',
False)
def testRunThroughHappySaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.do_next_exclusive_step('Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something That Takes A Long Time')
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterruptSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.do_next_exclusive_step('Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Acknowledge Non-Interrupt Message')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Do Something That Takes A Long Time')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughHappy(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something That Takes A Long Time')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterrupt(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Acknowledge Non-Interrupt Message')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_named_step('Do Something That Takes A Long Time')
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterruptOtherOrder(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Do Something That Takes A Long Time')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Acknowledge Non-Interrupt Message')
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterruptOtherOrderSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.do_next_exclusive_step(
'Select Test', choice='Message Non Interrupt')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Do Something That Takes A Long Time')
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step('Acknowledge Non-Interrupt Message')
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 7,681 | Python | .py | 127 | 51.519685 | 82 | 0.711067 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
928 | TimerDateTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerDateTest.py | import datetime
import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class TimerDateTest(BpmnWorkflowTestCase):
def setUp(self):
self.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({
"datetime": datetime.datetime,
"timedelta": datetime.timedelta,
}))
self.spec, self.subprocesses = self.load_workflow_spec('timer-date-start.bpmn', 'date_timer')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=self.script_engine)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
self.workflow.do_engine_steps()
self.assertEqual(len(self.workflow.waiting_events()), 1)
loopcount = 0
starttime = datetime.datetime.now()
# test bpmn has a timeout of .05s; we should terminate loop before that.
while len(self.workflow.get_tasks(state=TaskState.WAITING)) > 0 and loopcount < 8:
if save_restore:
self.save_restore()
self.workflow.script_engine = self.script_engine
time.sleep(0.01)
self.workflow.refresh_waiting_tasks()
loopcount += 1
endtime = datetime.datetime.now()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertTrue((endtime-starttime) > datetime.timedelta(seconds=.02))
| 1,728 | Python | .py | 36 | 39.861111 | 101 | 0.700535 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
929 | EventBasedGatewayTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/EventBasedGatewayTest.py | from datetime import timedelta
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class EventBasedGatewayTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('event-gateway.bpmn', 'Process_0pvx19v')
self.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"timedelta": timedelta}))
self.workflow = BpmnWorkflow(self.spec, script_engine=self.script_engine)
def testEventBasedGateway(self):
self.actual_test()
def testEventBasedGatewaySaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
self.workflow.do_engine_steps()
waiting_tasks = self.workflow.get_tasks(state=TaskState.WAITING)
if save_restore:
self.save_restore()
self.workflow.script_engine = self.script_engine
self.assertEqual(len(waiting_tasks), 2)
self.workflow.catch(BpmnEvent(MessageEventDefinition('message_1'), {}))
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
self.assertEqual(self.workflow.completed, True)
self.assertEqual(self.workflow.get_next_task(spec_name='message_1_event').state, TaskState.COMPLETED)
self.assertEqual(self.workflow.get_next_task(spec_name='message_2_event').state, TaskState.CANCELLED)
self.assertEqual(self.workflow.get_next_task(spec_name='timer_event').state, TaskState.CANCELLED)
def testTimeout(self):
self.workflow.do_engine_steps()
waiting_tasks = self.workflow.get_tasks(state=TaskState.WAITING)
self.assertEqual(len(waiting_tasks), 2)
timer_event_definition = waiting_tasks[0].task_spec.event_definition.event_definitions[-1]
self.workflow.catch(BpmnEvent(timer_event_definition))
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.completed, True)
self.assertEqual(self.workflow.get_next_task(spec_name='message_1_event').state, TaskState.CANCELLED)
self.assertEqual(self.workflow.get_next_task(spec_name='message_2_event').state, TaskState.CANCELLED)
self.assertEqual(self.workflow.get_next_task(spec_name='timer_event').state, TaskState.COMPLETED)
def testMultipleStart(self):
spec, subprocess = self.load_workflow_spec('multiple-start-parallel.bpmn', 'main')
workflow = BpmnWorkflow(spec)
workflow.do_engine_steps()
workflow.catch(BpmnEvent(MessageEventDefinition('message_1'), {}))
workflow.catch(BpmnEvent(MessageEventDefinition('message_2'), {}))
workflow.refresh_waiting_tasks()
workflow.do_engine_steps()
| 2,975 | Python | .py | 49 | 52.959184 | 109 | 0.737564 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
930 | TimerIntermediateTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerIntermediateTest.py | import datetime
import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class TimerIntermediateTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesss = self.load_workflow_spec(
'Test-Workflows/Timer-Intermediate.bpmn20.xml',
'sid-909dfba4-15dd-47b3-b7d4-88330891429a')
self.workflow = BpmnWorkflow(self.spec, self.subprocesss)
def testRunThroughHappy(self):
due_time = (datetime.datetime.now() + datetime.timedelta(seconds=0.01)).isoformat()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.workflow.get_tasks(state=TaskState.READY)[0].set_data(due_time=due_time)
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
time.sleep(0.02)
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.refresh_waiting_tasks()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.READY|TaskState.WAITING)))
| 1,391 | Python | .py | 25 | 48.28 | 98 | 0.732249 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
931 | TransactionSubprocssTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TransactionSubprocssTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'michaelc'
class TransactionSubprocessTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('transaction.bpmn', 'Main_Process')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.workflow.do_engine_steps()
def testNormalCompletion(self):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'value': 'asdf'})
ready_tasks[0].run()
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'quantity': 2})
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.assertIn('value', self.workflow.last_task.data)
# Check that workflow and next task completed
subprocess = self.workflow.get_next_task(spec_name='Subprocess')
self.assertEqual(subprocess.state, TaskState.COMPLETED)
print_task = self.workflow.get_next_task(spec_name="Activity_Print_Data")
self.assertEqual(print_task.state, TaskState.COMPLETED)
# Check that the boundary events were cancelled
cancel_task = self.workflow.get_next_task(spec_name="Catch_Cancel_Event")
self.assertEqual(cancel_task.state, TaskState.CANCELLED)
error_1_task = self.workflow.get_next_task(spec_name="Catch_Error_1")
self.assertEqual(error_1_task.state, TaskState.CANCELLED)
error_none_task = self.workflow.get_next_task(spec_name="Catch_Error_None")
self.assertEqual(error_none_task.state, TaskState.CANCELLED)
def testSubworkflowCancelEvent(self):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# If value == '', we cancel
ready_tasks[0].set_data(**{'value': ''})
ready_tasks[0].run()
self.workflow.do_engine_steps()
# If the subprocess gets cancelled, verify that data set there does not persist
self.assertNotIn('value', self.workflow.last_task.data)
# Check that we completed the Cancel Task
cancel_task = self.workflow.get_next_task(spec_name="Cancel_Action")
self.assertEqual(cancel_task.state, TaskState.COMPLETED)
# And cancelled the remaining tasks
error_1_task = self.workflow.get_next_task(spec_name="Catch_Error_1")
self.assertEqual(error_1_task.state, TaskState.CANCELLED)
error_none_task = self.workflow.get_next_task(spec_name="Catch_Error_None")
self.assertEqual(error_none_task.state, TaskState.CANCELLED)
# We should not have this task, as we followed the 'cancel branch'
print_task = self.workflow.get_tasks(spec_name="Activity_Print_Data")
self.assertEqual(len(print_task), 1)
self.assertEqual(print_task[0].state, TaskState.CANCELLED)
def testSubworkflowErrorCodeNone(self):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'value': 'asdf'})
ready_tasks[0].run()
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# If quantity == 0, we throw an error with no error code
ready_tasks[0].set_data(**{'quantity': 0})
ready_tasks[0].run()
self.workflow.do_engine_steps()
# We formerly checked that subprocess data does not persist, but I think it should persist
# A boundary event is just an alternate path out of a workflow, and we might need the context
# of the event in later steps
# The cancel boundary event should be cancelled
cancel_task = self.workflow.get_next_task(spec_name="Catch_Cancel_Event")
self.assertEqual(cancel_task.state, TaskState.CANCELLED)
# We should catch the None Error, but not Error 1
error_none_task = self.workflow.get_next_task(spec_name="Catch_Error_None")
self.assertEqual(error_none_task.state, TaskState.COMPLETED)
error_1_task = self.workflow.get_next_task(spec_name="Catch_Error_1")
self.assertEqual(error_1_task.state, TaskState.CANCELLED)
# Make sure this branch didn't getfollowed
print_task = self.workflow.get_tasks(spec_name="Activity_Print_Data")
self.assertEqual(len(print_task), 1)
self.assertEqual(print_task[0].state, TaskState.CANCELLED)
def testSubworkflowErrorCodeOne(self):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'value': 'asdf'})
ready_tasks[0].run()
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# If quantity < 0, we throw 'Error 1'
ready_tasks[0].set_data(**{'quantity': -1})
ready_tasks[0].run()
self.workflow.do_engine_steps()
# The cancel boundary event should be cancelled
# I've removed this check, see previous test for rationale
# Both boundary events should complete
error_none_task = self.workflow.get_next_task(spec_name="Catch_Error_None")
self.assertEqual(error_none_task.state, TaskState.COMPLETED)
error_1_task = self.workflow.get_next_task(spec_name="Catch_Error_1")
self.assertEqual(error_1_task.state, TaskState.COMPLETED)
print_task = self.workflow.get_tasks(spec_name="Activity_Print_Data")
self.assertEqual(len(print_task), 1)
self.assertEqual(print_task[0].state, TaskState.CANCELLED)
| 5,654 | Python | .py | 96 | 50.21875 | 101 | 0.691083 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
932 | TimerDurationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerDurationTest.py | import time
from datetime import datetime, timedelta
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class TimerDurationTest(BpmnWorkflowTestCase):
def setUp(self):
self.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"timedelta": timedelta}))
self.spec, self.subprocesses = self.load_workflow_spec('timer.bpmn', 'timer')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=self.script_engine)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
self.workflow.do_engine_steps()
self.assertEqual(len(self.workflow.waiting_events()), 1)
loopcount = 0
starttime = datetime.now()
# test bpmn has a timeout of .25s; we should terminate loop before that.
while len(self.workflow.get_tasks(state=TaskState.WAITING)) > 0 and loopcount < 10:
if save_restore:
self.save_restore()
self.workflow.script_engine = self.script_engine
time.sleep(0.1)
self.workflow.refresh_waiting_tasks()
loopcount += 1
endtime = datetime.now()
duration = endtime - starttime
self.assertEqual(duration < timedelta(seconds=.5), True)
self.assertEqual(duration > timedelta(seconds=.2), True)
self.assertEqual(len(self.workflow.waiting_events()), 0)
| 1,715 | Python | .py | 34 | 42.558824 | 106 | 0.707959 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
933 | MessagesTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MessagesTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class MessagesTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/*.bpmn20.xml',
'sid-b0903a88-fe74-4f93-b912-47b815ea8d1c',
False)
def testRunThroughHappy(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Messages')
self.workflow.do_engine_steps()
self.assertEqual([], self.workflow.get_tasks(state=TaskState.READY))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Wrong Message'), {}))
self.assertEqual([], self.workflow.get_tasks(state=TaskState.READY))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual('Test Message', self.workflow.get_tasks(state=TaskState.READY)[0].task_spec.bpmn_name)
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Messages')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual([], self.workflow.get_tasks(state=TaskState.READY))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Wrong Message'), {}))
self.assertEqual([], self.workflow.get_tasks(state=TaskState.READY))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed) | 2,420 | Python | .py | 40 | 52.35 | 111 | 0.722316 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
934 | TimerDurationBoundaryTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryTest.py | import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class TimerDurationTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('boundary.bpmn', 'boundary_event')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_next_task(state=TaskState.READY)
ready_tasks.run()
self.workflow.do_engine_steps()
loopcount = 0
# test bpmn has a timeout of .03s; we should terminate loop before that.
while len(self.workflow.get_tasks(state=TaskState.WAITING)) == 1 and loopcount < 11:
if save_restore:
self.save_restore()
time.sleep(0.01)
self.assertEqual(len(self.workflow.get_tasks(state=TaskState.READY)), 1)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
loopcount += 1
self.workflow.do_engine_steps()
subworkflow = self.workflow.get_next_task(spec_name='Subworkflow')
self.assertEqual(subworkflow.state, TaskState.CANCELLED)
ready_tasks = self.get_ready_user_tasks()
while len(ready_tasks) > 0:
ready_tasks[0].run()
ready_tasks = self.get_ready_user_tasks()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
# Assure that the loopcount is less than 10, and the timer interrupt fired, rather
# than allowing us to continue to loop the full 10 times.
self.assertTrue(loopcount < 10)
| 1,942 | Python | .py | 40 | 39.775 | 97 | 0.679535 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
935 | UncaughtEscalationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/UncaughtEscalationTest.py | import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class UncaughtEscalationTest(BpmnWorkflowTestCase):
def test_uncaught_escalation(self):
spec, subprocess_specs = self.load_workflow_spec('uncaught_escalation.bpmn', 'top_level')
workflow = BpmnWorkflow(spec, subprocess_specs)
workflow.do_engine_steps()
self.assertTrue(workflow.completed)
event = workflow.get_events()[0]
self.assertEqual(event.event_definition.code, 'escalation-1')
| 593 | Python | .py | 12 | 43.666667 | 97 | 0.770833 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
936 | StartEventSplitTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/StartEventSplitTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class StartEventSplitTest(BpmnWorkflowTestCase):
def setUp(self):
spec, subprocess_specs = self.load_workflow_spec('start_event_split.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
def test_start_event_split(self):
self.actual_test()
def test_start_event_split_save_restore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
ready = self.workflow.get_next_task(state=TaskState.READY)
self.run_until_input_required()
if save_restore:
self.save_restore()
start_1 = self.workflow.get_next_task(spec_name='start_1')
start_2 = self.workflow.get_next_task(spec_name='start_2')
self.assertEqual(start_1.state, TaskState.WAITING)
self.assertEqual(start_2.state, TaskState.WAITING)
message = BpmnEvent(MessageEventDefinition('message_1'))
self.workflow.catch(message)
self.run_until_input_required()
self.assertEqual(start_1.state, TaskState.COMPLETED)
self.assertEqual(start_2.state, TaskState.CANCELLED)
any_task = self.workflow.get_next_task(spec_name='any_task')
self.assertEqual(any_task.state, TaskState.READY)
any_task.run()
self.run_until_input_required()
self.assertTrue(self.workflow.completed)
| 1,557 | Python | .py | 31 | 42.419355 | 90 | 0.716744 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
937 | CancelBoundaryEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/CancelBoundaryEventTest.py | from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'michaelc'
class CancelBoundaryTest(BpmnWorkflowTestCase):
def testInvalidCancelEvent(self):
self.assertRaises(ValidationException, self.load_workflow_spec, 'invalid_cancel.bpmn', 'Process_1dagb7t')
| 364 | Python | .py | 6 | 56.666667 | 113 | 0.832386 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
938 | MessageNonInterruptsSpTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MessageNonInterruptsSpTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class MessageNonInterruptsSpTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/*.bpmn20.xml',
'sid-b6b1212d-76ea-4ced-888b-a99fbbbca575',
False)
def testRunThroughHappySaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something In a Subprocess')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_exclusive_step('Ack Subprocess Done')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.do_next_named_step('Do Something In a Subprocess')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Ack Subprocess Done')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Acknowledge SP Parallel Message')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageOrder2SaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.do_next_named_step('Do Something In a Subprocess')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Acknowledge SP Parallel Message')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Ack Subprocess Done')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageOrder3SaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.do_next_named_step('Acknowledge SP Parallel Message')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Do Something In a Subprocess')
self.workflow.do_engine_steps()
self.save_restore()
self.do_next_named_step('Ack Subprocess Done')
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 4,436 | Python | .py | 87 | 42.183908 | 82 | 0.696822 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
939 | CallActivityEscalationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/CallActivityEscalationTest.py | import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = '[email protected]'
def on_ready_cb(workflow, task, completed_set):
# In workflows that load a subworkflow, the newly loaded children
# will not have on_reached_cb() assigned. By using this function, we
# re-assign the function in every step, thus making sure that new
# children also call on_reached_cb().
for child in task.children:
track_task(child.task_spec, completed_set)
return True
def on_complete_cb(workflow, task, completed_set):
completed_set.add(task.task_spec.name)
return True
def track_task(task_spec, completed_set):
if task_spec.ready_event.is_connected(on_ready_cb):
task_spec.ready_event.disconnect(on_ready_cb)
task_spec.ready_event.connect(on_ready_cb, completed_set)
if task_spec.completed_event.is_connected(on_complete_cb):
task_spec.completed_event.disconnect(on_complete_cb)
task_spec.completed_event.connect(on_complete_cb, completed_set)
def track_workflow(wf_spec, completed_set):
for name in wf_spec.task_specs:
track_task(wf_spec.task_specs[name], completed_set)
class CallActivityEscalationTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, subprocesses = self.load_workflow_spec('Test-Workflows/*.bpmn20.xml', 'CallActivity-Escalation-Test', False)
self.workflow = BpmnWorkflow(self.spec, subprocesses)
def testShouldEscalate(self):
completed_set = set()
track_workflow(self.spec, completed_set)
for task in self.workflow.get_tasks(state=TaskState.READY):
task.set_data(should_escalate=True)
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.run_all()
self.assertEqual(True, self.workflow.completed)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' not in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' not in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_normal' not in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_escalated' in completed_set)
def testShouldNotEscalate(self):
completed_set = set()
track_workflow(self.spec, completed_set)
for task in self.workflow.get_tasks(state=TaskState.READY):
task.set_data(should_escalate=False)
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.run_all()
self.assertEqual(True, self.workflow.completed)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' not in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_escalated' not in completed_set)
def testMissingVariable(self):
completed_set = set()
track_workflow(self.spec, completed_set)
self.workflow.do_engine_steps()
self.save_restore()
self.workflow.run_all()
self.assertEqual(True, self.workflow.completed)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_noninterrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific1_interrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_noninterrupting_missingvariable' in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_normal' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_escalated' not in completed_set)
self.assertEqual(True, 'EndEvent_specific2_interrupting_missingvariable' in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_normal' in completed_set)
self.assertEqual(True, 'EndEvent_general_noninterrupting_escalated' in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_normal' not in completed_set)
self.assertEqual(True, 'EndEvent_general_interrupting_escalated' in completed_set)
class CallActivityEscalationWithoutSaveRestoreTest(CallActivityEscalationTest):
def save_restore(self):
pass # disabling save_restore for this test case
| 6,759 | Python | .py | 100 | 59.74 | 127 | 0.752039 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
940 | MultipleCatchEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MultipleCatchEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class MultipleStartEventTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('multiple-start.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testMultipleStartEvent(self):
self.actual_test()
def testMultipleStartEventSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
self.workflow.do_engine_steps()
waiting_tasks = self.workflow.get_tasks(state=TaskState.WAITING)
if save_restore:
self.save_restore()
# The start event should be waiting
self.assertEqual(len(waiting_tasks), 1)
self.assertEqual(waiting_tasks[0].task_spec.name, 'StartEvent_1')
self.workflow.catch(BpmnEvent(MessageEventDefinition('message_1'), {}))
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
# Now the first task should be ready
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 1)
self.assertEqual(ready_tasks[0].task_spec.name, 'any_task')
class ParallelStartEventTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec('multiple-start-parallel.bpmn', 'main')
self.workflow = BpmnWorkflow(self.spec)
def testParallelStartEvent(self):
self.actual_test()
def testParallelStartEventSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
self.workflow.do_engine_steps()
waiting_tasks = self.workflow.get_tasks(state=TaskState.WAITING)
if save_restore:
self.save_restore()
# The start event should be waiting
self.assertEqual(len(waiting_tasks), 1)
self.assertEqual(waiting_tasks[0].task_spec.name, 'StartEvent_1')
self.workflow.catch(BpmnEvent(MessageEventDefinition('message_1'), {}))
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
# It should still be waiting because it has to receive both messages
waiting_tasks = self.workflow.get_tasks(state=TaskState.WAITING)
self.assertEqual(len(waiting_tasks), 1)
self.assertEqual(waiting_tasks[0].task_spec.name, 'StartEvent_1')
self.workflow.catch(BpmnEvent(MessageEventDefinition('message_2'), {}))
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
# Now the first task should be ready
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 1)
self.assertEqual(ready_tasks[0].task_spec.name, 'any_task')
| 2,941 | Python | .py | 57 | 43.280702 | 102 | 0.705717 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
941 | TimerDurationBoundaryOnTaskTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimerDurationBoundaryOnTaskTest.py | import time
from datetime import timedelta
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'kellym'
class TimerDurationTest(BpmnWorkflowTestCase):
def setUp(self):
self.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"timedelta": timedelta}))
self.spec, self.subprocesses = self.load_workflow_spec('boundary_timer_on_task.bpmn', 'test_timer')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses, script_engine=self.script_engine)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.script_engine = self.script_engine
time.sleep(1)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
# Make sure the timer got called
self.assertEqual(self.workflow.last_task.data['timer_called'],True)
# Make sure the task can still be called.
task = self.get_ready_user_tasks()[0]
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 1,444 | Python | .py | 30 | 40.833333 | 107 | 0.721627 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
942 | TimeDurationParseTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/TimeDurationParseTest.py | import unittest
from datetime import datetime
from SpiffWorkflow.bpmn.specs.event_definitions.timer import TimerEventDefinition
class TimeDurationParseTest(unittest.TestCase):
"Non-exhaustive ISO durations, but hopefully covers basic support"
def test_parse_duration(self):
valid = [
("P1Y6M1DT1H1M1S", {'years': 1, 'months': 6, 'days': 1, 'hours': 1, 'minutes': 1, 'seconds': 1 }), # everything
("P1Y6M1DT1H1M1.5S", {'years': 1, 'months': 6, 'days': 1, 'hours': 1, 'minutes': 1, 'seconds': 1.5 }), # fractional seconds
("P1YT1H1M1S", {'years': 1, 'hours': 1, 'minutes': 1, 'seconds': 1 }), # minutes but no month
("P1MT1H", {'months': 1, 'hours':1}), # months but no minutes
("P4W", {'weeks': 4}), # weeks
("P1Y6M1D", {'years': 1, 'months': 6, 'days': 1}), # no time
("PT1H1M1S", {'hours': 1,'minutes': 1,'seconds': 1}), # time only
("PT1.5H", {'hours': 1.5}), # alt fractional
("T1,5H", {'hours': 1.5}), # fractional with comma
("PDT1H1M1S", {'hours': 1, 'minutes': 1, 'seconds': 1}), # empty spec
("PYMDT1H1M1S", {'hours': 1, 'minutes': 1, 'seconds': 1}), # multiple empty
]
for duration, parsed_duration in valid:
result = TimerEventDefinition.parse_iso_duration(duration)
self.assertDictEqual(result, parsed_duration)
invalid = [
"PT1.5H30S", # fractional duration with subsequent non-fractional
"PT1,5H30S", # with comma
"P1H1M1S", # missing 't'
"P1DT", # 't' without time spec
"P1W1D", # conflicting day specs
"PT1H1M1", # trailing values
]
for duration in invalid:
self.assertRaises(Exception, TimerEventDefinition.parse_iso_duration, duration)
def test_calculate_timedelta_from_start(self):
start, one_day = datetime.fromisoformat("2023-01-01"), 24 * 3600
# Leap years
self.assertEqual(TimerEventDefinition.get_timedelta_from_start({'years': 1}, start).total_seconds(), 365 * one_day)
self.assertEqual(TimerEventDefinition.get_timedelta_from_start({'years': 2}, start).total_seconds(), (365 + 366) * one_day)
# Increment by month does not change day
for month in range(1, 13):
dt = start + TimerEventDefinition.get_timedelta_from_start({'months': month}, start)
self.assertEqual(dt.day, 1)
def test_calculate_timedelta_from_end(self):
end, one_day = datetime.fromisoformat("2025-01-01"), 24 * 3600
# Leap years
self.assertEqual(TimerEventDefinition.get_timedelta_from_end({'years': 1}, end).total_seconds(), 366 * one_day)
self.assertEqual(TimerEventDefinition.get_timedelta_from_end({'years': 2}, end).total_seconds(), (365 + 366) * one_day)
dt = end - TimerEventDefinition.get_timedelta_from_end({'months': 11}, end)
# Decrement by month does not change day
for month in range(1, 13):
dt = end - TimerEventDefinition.get_timedelta_from_end({'months': month}, end)
self.assertEqual(dt.day, 1)
| 3,733 | Python | .py | 51 | 63.078431 | 139 | 0.520011 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
943 | MessageInterruptsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/MessageInterruptsTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.bpmn.specs.event_definitions import MessageEventDefinition
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class MessageInterruptsTest(BpmnWorkflowTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/*.bpmn20.xml',
'sid-b0903a88-fe74-4f93-b912-47b815ea8d1c',
False)
def testRunThroughHappySaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.do_next_exclusive_step('Select Test', choice='Message Interrupts')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something That Takes A Long Time')
self.save_restore()
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.save_restore()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterruptSaveAndRestore(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.save_restore()
self.do_next_exclusive_step('Select Test', choice='Message Interrupts')
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_exclusive_step('Acknowledge Interrupt Message')
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertTrue(self.workflow.completed)
def testRunThroughHappy(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Message Interrupts')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.do_next_exclusive_step('Do Something That Takes A Long Time')
self.workflow.do_engine_steps()
self.assertEqual(0, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughMessageInterrupt(self):
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
self.do_next_exclusive_step('Select Test', choice='Message Interrupts')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.workflow.catch(BpmnEvent(MessageEventDefinition('Test Message'), {}))
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_exclusive_step('Acknowledge Interrupt Message')
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
| 4,203 | Python | .py | 73 | 48.890411 | 82 | 0.713763 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
944 | ConditionalEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/ConditionalEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
class ConditionalEventTest(BpmnWorkflowTestCase):
def testIntermediateEvent(self):
spec, subprocesses = self.load_workflow_spec('conditional_event.bpmn', 'intermediate')
self.workflow = BpmnWorkflow(spec, subprocesses)
# I don't want to complicate the diagram with extra tasks just for initializing this value
self.workflow.data_objects['task_a_done'] = False
self.workflow.do_engine_steps()
b = self.workflow.get_next_task(spec_name='task_b')
b.run()
self.save_restore()
event = self.workflow.get_next_task(spec_name='event_1')
# The event waits for task_a_done to become True
self.assertEqual(event.state, TaskState.WAITING)
a = self.workflow.get_next_task(spec_name='task_a')
a.data['task_a_done'] = True
a.run()
self.save_restore()
# Completion of A results in event being updated
self.assertEqual(event.state, TaskState.READY)
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testBoundaryEvent(self):
spec, subprocesses = self.load_workflow_spec('conditional_event.bpmn', 'boundary')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.data_objects['task_c_done'] = False
self.workflow.do_engine_steps()
c = self.workflow.get_next_task(spec_name='task_c')
c.data['task_c_done'] = True
event = self.workflow.get_next_task(spec_name='event_2')
self.assertEqual(event.state, TaskState.WAITING)
c.run()
self.assertEqual(event.state, TaskState.READY)
self.workflow.do_engine_steps()
d = self.workflow.get_next_task(spec_name='task_d')
self.assertEqual(d.state, TaskState.CANCELLED)
self.assertTrue(self.workflow.completed)
| 1,975 | Python | .py | 39 | 42.538462 | 98 | 0.689798 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
945 | ActionManagementTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/events/ActionManagementTest.py | import datetime
import time
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from ..BpmnWorkflowTestCase import BpmnWorkflowTestCase
__author__ = 'matth'
class ActionManagementTest(BpmnWorkflowTestCase):
START_TIME_DELTA=0.05
FINISH_TIME_DELTA=0.10
def now_plus_seconds(self, seconds):
return (datetime.datetime.now() + datetime.timedelta(seconds=seconds)).isoformat()
def setUp(self):
self.spec, self.subprocesses = self.load_workflow_spec(
'Test-Workflows/Action-Management.bpmn20.xml',
'sid-efb89bb6-299a-4dc4-a50a-4286ec490604')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
start_time = self.now_plus_seconds(self.START_TIME_DELTA)
finish_time = self.now_plus_seconds(self.FINISH_TIME_DELTA)
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.workflow.get_tasks(state=TaskState.READY)[0].set_data(start_time=start_time, finish_time=finish_time)
def testRunThroughHappy(self):
self.do_next_exclusive_step("Review Action", choice='Approve')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual('NEW ACTION', self.workflow.get_tasks(state=TaskState.READY)[0].get_data('script_output'))
self.assertEqual('Cancel Action (if necessary)', self.workflow.get_tasks(state=TaskState.READY)[0].task_spec.bpmn_name)
time.sleep(self.START_TIME_DELTA)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step("Start Work")
self.workflow.do_engine_steps()
self.do_next_named_step("Complete Work", choice="Done")
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughOverdue(self):
self.do_next_exclusive_step("Review Action", choice='Approve')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
self.assertEqual('Cancel Action (if necessary)', self.workflow.get_tasks(state=TaskState.READY)[0].task_spec.bpmn_name)
time.sleep(self.START_TIME_DELTA)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step("Start Work")
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual('Finish Time', self.workflow.get_next_task(state=TaskState.WAITING).task_spec.bpmn_name)
time.sleep(self.FINISH_TIME_DELTA)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertNotEqual('Finish Time', self.workflow.get_tasks(state=TaskState.WAITING)[0].task_spec.bpmn_name)
overdue_escalation_task = [
t for t in self.workflow.get_tasks() if t.task_spec.bpmn_name == 'Overdue Escalation']
self.assertEqual(1, len(overdue_escalation_task))
overdue_escalation_task = overdue_escalation_task[0]
self.assertEqual(TaskState.COMPLETED, overdue_escalation_task.state)
self.assertEqual('ACTION OVERDUE', overdue_escalation_task.get_data('script_output'))
self.do_next_named_step("Complete Work", choice="Done")
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughCancel(self):
self.do_next_exclusive_step("Review Action", choice='Cancel')
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
def testRunThroughCancelAfterApproved(self):
self.do_next_exclusive_step("Review Action", choice='Approve')
self.workflow.do_engine_steps()
self.do_next_named_step("Cancel Action (if necessary)")
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertEqual('ACTION CANCELLED', self.workflow.get_data('script_output'))
def testRunThroughCancelAfterWorkStarted(self):
self.do_next_exclusive_step("Review Action", choice='Approve')
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.READY)))
time.sleep(self.START_TIME_DELTA)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.WAITING)))
self.assertEqual(1, len(self.workflow.get_tasks(state=TaskState.STARTED)))
self.assertEqual(2, len(self.workflow.get_tasks(state=TaskState.READY)))
self.do_next_named_step("Start Work")
self.workflow.do_engine_steps()
self.do_next_named_step("Cancel Action (if necessary)")
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertEqual('ACTION CANCELLED', self.workflow.get_data('script_output'))
| 6,009 | Python | .py | 98 | 52.795918 | 127 | 0.712487 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
946 | BaseTestCase.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/serializer/BaseTestCase.py | import unittest
import os
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.parser import BpmnParser
from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer
class BaseTestCase(unittest.TestCase):
SERIALIZER_VERSION = "100.1.ANY"
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
def get_ready_user_tasks(self, lane=None):
return self.workflow.get_tasks(state=TaskState.READY, manual=True, lane=lane)
def load_workflow_spec(self, filename, process_name):
parser = BpmnParser()
parser.add_bpmn_files_by_glob(os.path.join(self.DATA_DIR, filename))
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses
def deserialize_workflow(self, filename):
fn = os.path.join(self.DATA_DIR, 'serialization', filename)
with open(fn) as fh:
return self.serializer.deserialize_json(fh.read())
def setUp(self):
super(BaseTestCase, self).setUp()
wf_spec_converter = BpmnWorkflowSerializer.configure()
self.serializer = BpmnWorkflowSerializer(wf_spec_converter, version=self.SERIALIZER_VERSION)
spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact')
self.workflow = BpmnWorkflow(spec, subprocesses)
| 1,424 | Python | .py | 27 | 46.259259 | 100 | 0.732901 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
947 | BpmnWorkflowSerializerTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/serializer/BpmnWorkflowSerializerTest.py | import unittest
import os
import json
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine
from .BaseTestCase import BaseTestCase
class BpmnWorkflowSerializerTest(BaseTestCase):
SERIALIZER_VERSION = "100.1.ANY"
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
def testSerializeWorkflowSpec(self):
spec_serialized = self.serializer.serialize_json(self.workflow)
result = self.serializer.deserialize_json(spec_serialized)
spec_serialized2 = self.serializer.serialize_json(result)
self.assertEqual(spec_serialized, spec_serialized2)
def testSerializeWorkflowSpecWithGzip(self):
spec_serialized = self.serializer.serialize_json(self.workflow, use_gzip=True)
result = self.serializer.deserialize_json(spec_serialized, use_gzip=True)
spec_serialized2 = self.serializer.serialize_json(result, use_gzip=True)
self.assertEqual(spec_serialized, spec_serialized2)
def testSerlializePerservesVersion(self):
spec_serialized = self.serializer.serialize_json(self.workflow)
version = self.serializer.get_version(spec_serialized)
self.assertEqual(version, self.SERIALIZER_VERSION)
def testSerializeWorkflow(self):
serialized = self.serializer.serialize_json(self.workflow)
json.loads(serialized)
def testSerializeWorkflowCustomJSONEncoderDecoder(self):
class MyCls:
a = 1
def to_dict(self):
return {'a': 1, 'my_type': 'mycls'}
@classmethod
def from_dict(self, data):
return MyCls()
class MyJsonEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, MyCls):
return z.to_dict()
return super().default(z)
class MyJsonDecoder(json.JSONDecoder):
classes = {'mycls': MyCls}
def __init__(self, *args, **kwargs):
super().__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, z):
if 'my_type' in z and z['my_type'] in self.classes:
return self.classes[z['my_type']].from_dict(z)
return z
unserializable = MyCls()
a_task_spec = self.workflow.spec.task_specs[list(self.workflow.spec.task_specs)[0]]
a_task = self.workflow.get_tasks(spec_name=a_task_spec.name)[0]
a_task.data['jsonTest'] = unserializable
try:
self.assertRaises(TypeError, self.serializer.serialize_json, self.workflow)
wf_spec_converter = BpmnWorkflowSerializer.configure()
custom_serializer = BpmnWorkflowSerializer(wf_spec_converter,
version=self.SERIALIZER_VERSION,
json_encoder_cls=MyJsonEncoder,
json_decoder_cls=MyJsonDecoder)
serialized_workflow = custom_serializer.serialize_json(self.workflow)
finally:
a_task.data.pop('jsonTest',None)
serialized_task = [x for x in json.loads(serialized_workflow)['tasks'].values() if x['task_spec'] == a_task_spec.name][0]
self.assertEqual(serialized_task['data']['jsonTest'], {'a': 1, 'my_type': 'mycls'})
deserialized_workflow = custom_serializer.deserialize_json(serialized_workflow)
deserialized_task = deserialized_workflow.get_tasks(spec_name=a_task_spec.name)[0]
self.assertTrue(isinstance(deserialized_task.data['jsonTest'], MyCls))
def testDeserializeWorkflow(self):
self._compare_with_deserialized_copy(self.workflow)
def testDeserializeActiveWorkflow(self):
self.workflow.do_engine_steps()
self._compare_with_deserialized_copy(self.workflow)
def testDeserializeWithData(self):
self.workflow.data["test"] = "my_test"
json = self.serializer.serialize_json(self.workflow)
wf2 = self.serializer.deserialize_json(json)
self.assertEqual('my_test', wf2.get_data("test"))
def testDeserializeWithDefaultScriptEngineClass(self):
json = self.serializer.serialize_json(self.workflow)
wf2 = self.serializer.deserialize_json(json)
self.assertIsNotNone(self.workflow.script_engine)
self.assertIsNotNone(wf2.script_engine)
self.assertEqual(self.workflow.script_engine.__class__,
wf2.script_engine.__class__)
@unittest.skip("Deserialize does not persist the script engine, Fix me.")
def testDeserializeWithCustomScriptEngine(self):
class CustomScriptEngine(PythonScriptEngine):
pass
self.workflow.script_engine = CustomScriptEngine()
dct = self.serializer.serialize_json(self.workflow)
wf2 = self.serializer.deserialize_json(dct)
self.assertEqual(self.workflow.script_engine.__class__,
wf2.script_engine.__class__)
def testDeserializeWithDataOnTask(self):
self.workflow.do_engine_steps()
user_task = self.get_ready_user_tasks()[0]
user_task.data = {"test":"my_test"}
self._compare_with_deserialized_copy(self.workflow)
def testSerializeIgnoresCallable(self):
self.workflow.do_engine_steps()
user_task = self.get_ready_user_tasks()[0]
def f(n):
return n + 1
user_task.data = { 'f': f }
task_id = str(user_task.id)
dct = self.serializer.to_dict(self.workflow)
self.assertNotIn('f', dct['tasks'][task_id]['data'])
def testLastTaskIsSetAndWorksThroughRestore(self):
self.workflow.do_engine_steps()
json = self.serializer.serialize_json(self.workflow)
wf2 = self.serializer.deserialize_json(json)
self.assertIsNotNone(self.workflow.last_task)
self.assertIsNotNone(wf2.last_task)
self._compare_workflows(self.workflow, wf2)
def test_serialize_workflow_where_script_task_includes_function(self):
self.workflow.do_engine_steps()
ready_tasks = self.get_ready_user_tasks()
ready_tasks[0].run()
self.workflow.do_engine_steps()
self.serializer.serialize_json(self.workflow)
assert self.workflow.completed
assert 'y' in self.workflow.last_task.data
assert 'x' not in self.workflow.last_task.data
assert 'some_fun' not in self.workflow.last_task.data
def _compare_with_deserialized_copy(self, wf):
json = self.serializer.serialize_json(wf)
wf2 = self.serializer.deserialize_json(json)
self._compare_workflows(wf, wf2)
def _compare_workflows(self, w1, w2):
self.assertIsInstance(w1, BpmnWorkflow)
self.assertIsInstance(w2, BpmnWorkflow)
self.assertEqual(w1.data, w2.data)
for task in w1.get_tasks():
w2_task = w2.get_task_from_id(task.id)
self.assertIsNotNone(w2_task)
self.assertEqual(task.data, w2_task.data)
| 7,163 | Python | .py | 136 | 42.051471 | 129 | 0.661138 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
948 | VersionMigrationTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/bpmn/serializer/VersionMigrationTest.py | import os
import time
from uuid import UUID
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from SpiffWorkflow.bpmn.serializer.exceptions import VersionMigrationError
from .BaseTestCase import BaseTestCase
class Version_1_0_Test(BaseTestCase):
def test_convert_subprocess(self):
# The serialization used here comes from NestedSubprocessTest saved at line 25 with version 1.0
wf = self.deserialize_workflow('v1.0.json')
# We should be able to finish the workflow from this point
ready_tasks = wf.get_tasks(state=TaskState.READY)
self.assertEqual('Action3', ready_tasks[0].task_spec.bpmn_name)
ready_tasks[0].run()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertEqual(True, wf.completed)
class Version_1_1_Test(BaseTestCase):
def test_timers(self):
wf = self.deserialize_workflow('v1.1-timers.json')
wf.script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"time": time}))
wf.refresh_waiting_tasks()
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertTrue(wf.completed)
def test_convert_data_specs(self):
wf = self.deserialize_workflow('v1.1-data.json')
wf.do_engine_steps()
wf.refresh_waiting_tasks()
wf.do_engine_steps()
self.assertTrue(wf.completed)
def test_convert_exclusive_gateway(self):
wf = self.deserialize_workflow('v1.1-gateways.json')
wf.do_engine_steps()
task = wf.get_next_task(spec_name='Gateway_askQuestion')
self.assertEqual(len(task.task_spec.cond_task_specs), 2)
ready_task = wf.get_tasks(state=TaskState.READY, manual=True)[0]
ready_task.data['NeedClarification'] = 'Yes'
ready_task.run()
wf.do_engine_steps()
ready_task = wf.get_tasks(state=TaskState.READY, manual=True)[0]
self.assertEqual(ready_task.task_spec.name, 'Activity_A2')
def test_check_multiinstance(self):
with self.assertRaises(VersionMigrationError) as ctx:
wf = self.deserialize_workflow('v1.1-multi.json')
self.assertEqual(ctx.exception.message, "This workflow cannot be migrated because it contains MultiInstance Tasks")
def test_remove_loop_reset(self):
wf = self.deserialize_workflow('v1.1-loop-reset.json')
# Allow 3 seconds max to allow this test to complete (there are 20 loops with a 0.1s timer)
end = time.time() + 3
while not wf.completed and time.time() < end:
wf.do_engine_steps()
wf.refresh_waiting_tasks()
self.assertTrue(wf.completed)
self.assertEqual(wf.last_task.data['counter'], 20)
def test_update_task_states(self):
wf = self.deserialize_workflow('v1.1-task-states.json')
start = wf.get_tasks(end_at_spec='Start')[0]
self.assertEqual(start.state, TaskState.COMPLETED)
signal = wf.get_next_task(spec_name='signal')
self.assertEqual(signal.state, TaskState.CANCELLED)
ready_tasks = wf.get_tasks(state=TaskState.READY)
while len(ready_tasks) > 0:
ready_tasks[0].run()
ready_tasks = wf.get_tasks(state=TaskState.READY)
self.assertTrue(wf.completed)
class Version_1_2_Test(BaseTestCase):
def test_remove_boundary_events(self):
wf = self.deserialize_workflow('v1.2-boundary-events.json')
ready_tasks = wf.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'value': 'asdf'})
ready_tasks[0].run()
wf.do_engine_steps()
ready_tasks = wf.get_tasks(state=TaskState.READY)
ready_tasks[0].set_data(**{'quantity': 2})
ready_tasks[0].run()
wf.do_engine_steps()
self.assertIn('value', wf.last_task.data)
# Check that workflow and next task completed
subprocess = wf.get_next_task(spec_name='Subprocess')
self.assertEqual(subprocess.state, TaskState.COMPLETED)
print_task = wf.get_next_task(spec_name="Activity_Print_Data")
self.assertEqual(print_task.state, TaskState.COMPLETED)
# Check that the boundary events were cancelled
cancel_task = wf.get_next_task(spec_name="Catch_Cancel_Event")
self.assertEqual(cancel_task.state, TaskState.CANCELLED)
error_1_task = wf.get_next_task(spec_name="Catch_Error_1")
self.assertEqual(error_1_task.state, TaskState.CANCELLED)
error_none_task = wf.get_next_task(spec_name="Catch_Error_None")
self.assertEqual(error_none_task.state, TaskState.CANCELLED)
def test_remove_noninterrupting_boundary_events(self):
wf = self.deserialize_workflow('v1.2-boundary-events-noninterrupting.json')
wf.get_next_task(spec_name='sid-D3365C47-2FAE-4D17-98F4-E68B345E18CE').run()
wf.do_engine_steps()
self.assertEqual(1, len(wf.get_tasks(state=TaskState.READY)))
self.assertEqual(3, len(wf.get_tasks(state=TaskState.WAITING)))
wf.get_next_task(spec_name='sid-6FBBB56D-00CD-4C2B-9345-486986BB4992').run()
wf.do_engine_steps()
self.assertTrue(wf.completed)
def test_update_data_objects(self):
# Workflow source: serialized from DataObjectTest after the subprocess has been created
wf = self.deserialize_workflow('v1.2-data-objects.json')
# Check that the data objects were removed from the subprocess
sp_task = wf.get_next_task(spec_name='subprocess')
sp = wf.get_subprocess(sp_task)
self.assertNotIn('obj_1', sp.data)
self.assertNotIn('data_objects', sp.data)
sp_spec = wf.subprocess_specs.get('subprocess')
self.assertEqual(len(sp_spec.data_objects), 0)
# Make sure we can complete the process as we did in the original test
wf.do_engine_steps()
ready_tasks = wf.get_tasks(state=TaskState.READY)
self.assertEqual(ready_tasks[0].data['obj_1'], 'hello')
ready_tasks[0].data['obj_1'] = 'hello again'
ready_tasks[0].run()
wf.do_engine_steps()
# The data object is not in the task data
self.assertNotIn('obj_1', sp_task.data)
# The update should persist in the main process
self.assertEqual(wf.data_objects['obj_1'], 'hello again')
def test_update_nested_data_objects(self):
wf = self.deserialize_workflow('v1.2-data-objects-nested.json')
self.assertIn('top_level_data_object', wf.data_objects)
self.assertNotIn('sub_level_data_object_two', wf.data)
self.assertNotIn('sub_level_data_object_three', wf.data)
process_sub = wf.subprocesses[UUID('270d76e0-c1fe-4add-b58e-d5a51214a37b')]
call_sub = wf.subprocesses[UUID('d0c6a2d9-9a43-4ccd-b4e3-ea62872f15ed')]
self.assertNotIn('top_level_data_object', process_sub.spec.data_objects)
self.assertNotIn('top_level_data_object', call_sub.spec.data_objects)
self.assertIn('sub_level_data_object_two', process_sub.spec.data_objects)
self.assertNotIn('sub_level_data_object_two', call_sub.spec.data_objects)
self.assertIn('sub_level_data_object_three', call_sub.spec.data_objects)
self.assertNotIn('sub_level_data_object_three', process_sub.spec.data_objects)
self.assertNotIn('top_level_data_object', process_sub.data_objects)
self.assertNotIn('top_level_data_object', call_sub.data_objects)
self.assertIn('sub_level_data_object_two', process_sub.data_objects)
self.assertNotIn('sub_level_data_object_two', call_sub.data_objects)
self.assertIn('sub_level_data_object_three', call_sub.data_objects)
self.assertNotIn('sub_level_data_object_three', process_sub.data_objects)
class Version_1_3_Test(BaseTestCase):
def test_update_mi_states(self):
wf = self.deserialize_workflow('v1.3-mi-states.json')
any_task = wf.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
instance_map = task_info['instance_map']
self.assertEqual(len(wf.get_tasks(state=TaskState.WAITING)), 0)
ready_tasks = wf.get_tasks(state=TaskState.READY, manual=True)
self.assertEqual(len(ready_tasks), 1)
while len(ready_tasks) > 0:
task = ready_tasks[0]
task_info = task.task_spec.task_info(task)
self.assertEqual(task.task_spec.name, 'any_task [child]')
self.assertIn('input_item', task.data)
self.assertEqual(instance_map[task_info['instance']], str(task.id))
task.data['output_item'] = task.data['input_item'] * 2
task.run()
ready_tasks = wf.get_tasks(state=TaskState.READY, manual=True)
wf.refresh_waiting_tasks()
wf.do_engine_steps()
any_task = wf.get_next_task(spec_name='any_task')
task_info = any_task.task_spec.task_info(any_task)
self.assertEqual(len(task_info['completed']), 3)
self.assertEqual(len(task_info['running']), 0)
self.assertEqual(len(task_info['future']), 0)
self.assertTrue(wf.completed)
| 9,277 | Python | .py | 170 | 45.823529 | 127 | 0.674972 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
949 | CallActivityMessageTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/CallActivityMessageTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
__author__ = 'essweine'
class CallActivityMessageTest(BaseTestCase):
def setUp(self):
spec, subprocesses = self.load_collaboration('call_activity_with_message*.bpmn', 'Parent_Process')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self, save_restore=False):
steps = [
('Activity_EnterPlan',{'plan_details':'Bad'}),
('Activity_ApproveOrDeny', {'approved':'No'}),
('Activity_EnterPlan', {'plan_details':'Better'}),
('Activity_ApproveOrDeny', {'approved':'No'}),
('Activity_EnterPlan', {'plan_details':'Best'}),
('Activity_ApproveOrDeny', {'approved':'Yes'}),
('Activity_EnablePlan',{'Done':'OK!'})
]
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
started_tasks = self.workflow.get_tasks(state=TaskState.STARTED)
self.assertEqual(1, len(ready_tasks),'Expected to have one ready task')
self.assertEqual(2, len(started_tasks), 'Expected to have two started tasks')
for step in steps:
current_task = ready_tasks[0]
self.assertEqual(current_task.task_spec.name, step[0])
current_task.set_data(**step[1])
current_task.run()
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(self.workflow.completed, True, 'Expected the workflow to be complete at this point')
self.assertEqual(
self.workflow.last_task.data,
{'plan_details': 'Best', 'Approved': 'Yes', 'Done': 'OK!'}
)
| 2,037 | Python | .py | 41 | 40.02439 | 109 | 0.63998 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
950 | ResetTokenParallelTaskCountTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
__author__ = 'kellym'
class ResetTokenParallelTaskCountTest(BaseTestCase):
"""Assure that setting the token does not effect the overall task
count. Added this when we discovered that this was growing
exponentially in some cases.."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('token_trial_parallel_simple.bpmn', 'token_trial_parallel_simple')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self, save_restore=False):
total = 9
# Set the workflow in motion, and assure we have the right
# number of tasks
self.workflow.do_engine_steps()
self.assertEqual(total, len(self.workflow.get_tasks()))
# Tell the exclusive gateway to skip the parallel tasks section.
# We should still have the same number of tasks.
data = {'skipParallel': True}
task = self.get_ready_user_tasks()[0]
task.data = data
self.workflow.run_task_from_id(task.id)
self.assertEqual(total, len(self.workflow.get_tasks()))
# Reset the token to the first user task.
# We should still have the same number of tasks.
task.reset_branch(data)
self.assertEqual(total, len(self.workflow.get_tasks()))
self.assertEqual(1, len(self.get_ready_user_tasks()))
| 1,615 | Python | .py | 32 | 43.03125 | 119 | 0.701208 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
951 | ResetTokenParallelMatrixTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
__author__ = 'kellym'
class ResetTokenTestParallelMatrix(BaseTestCase):
"""The example bpmn diagram tests both a set cardinality from user input
as well as looping over an existing array."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('token_trial_parallel_matrix.bpmn', 'token')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=True)
def testRunThroughHappyAlt(self):
self.actual_test2(save_restore=False)
def testRunThroughSaveRestoreAlt(self):
self.actual_test2(save_restore=True)
def actual_test(self, save_restore=False,reset_data=False):
"""
Test a complicated parallel matrix, complete the matrix and
Reset somewhere in the middle. It should complete the row that we
Reset to, and retain all previous answers.
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
{'taskname': 'FormB1',
'formvar': 'B1',
'answer': 'xb1'},
{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'xb2'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'xb3'},
{'taskname': 'FormC1',
'formvar': 'C1',
'answer': 'xc1'},
{'taskname': 'FormC2',
'formvar': 'C2',
'answer': 'xc2'},
{'taskname': 'FormC3',
'formvar': 'C3',
'answer': 'xc3'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormB2':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
#NB - this won't test random access
steps = [{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'b2'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'b3'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'xa1',
'A2': 'xa2',
'A3': 'xa3',
'B1': 'xb1',
'B2': 'b2',
'B3': 'b3',
'C1': 'xc1',
'C2': 'xc2',
'C3': 'xc3',
'D': 'd'},
self.workflow.last_task.data)
def actual_test2(self, save_restore=False,reset_data=False):
"""
Test a complicated parallel matrix,
Complete several items in the parallel matrix, but do not complete it,
Reset to a previous version on another branch of the parallel, it should
complete that branch and then pick up where we left off.
Also, after we reset the branch, there should then be three tasks ready,
A2,B3,and C1
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
{'taskname': 'FormB1',
'formvar': 'B1',
'answer': 'xb1'},
{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'xb2'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormA2':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
#NB - this won't test random access
steps = [{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'a2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'a3'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'b3'},
{'taskname': 'FormC1',
'formvar': 'C1',
'answer': 'c1'},
{'taskname': 'FormC2',
'formvar': 'C2',
'answer': 'c2'},
{'taskname': 'FormC3',
'formvar': 'C3',
'answer': 'c3'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
readytasks = [t.task_spec.name for t in self.get_ready_user_tasks()]
self.assertEqual(readytasks,['FormA2','FormB3','FormC1'])
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'xa1',
'A2': 'a2',
'A3': 'a3',
'B1': 'xb1',
'B2': 'xb2',
'B3': 'b3',
'C1': 'c1',
'C2': 'c2',
'C3': 'c3',
'D': 'd'},
self.workflow.last_task.data)
| 7,785 | Python | .py | 187 | 25.930481 | 97 | 0.451928 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
952 | MessageBoundaryEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/MessageBoundaryEventTest.py | import time
from datetime import timedelta
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from .BaseTestCase import BaseTestCase
__author__ = 'kellym'
class MessageBoundaryTest(BaseTestCase):
def setUp(self):
script_engine = PythonScriptEngine(environment=TaskDataEnvironment({"timedelta": timedelta}))
spec, subprocess_specs = self.load_collaboration('MessageBoundary.bpmn', 'Collaboration_0fh00ao')
self.workflow = BpmnWorkflow(spec, subprocess_specs, script_engine=script_engine)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
steps = [
('Activity_Interrupt', {'interrupt_task':'No'}),
('Activity_Interrupt', {'interrupt_task': 'No'}),
('Activity_Interrupt', {'interrupt_task': 'Yes'}),
]
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(2, len(ready_tasks),'Expected to have two ready tasks')
for step in steps:
for task in ready_tasks:
if task.task_spec.name == step[0]:
task.set_data(**step[1])
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
time.sleep(.01)
self.workflow.refresh_waiting_tasks()
if save_restore:
self.save_restore()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
time.sleep(.01)
self.workflow.refresh_waiting_tasks()
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.completed, True, 'Expected the workflow to be complete at this point')
| 1,970 | Python | .py | 40 | 39.675 | 109 | 0.665451 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
953 | BaseTestCase.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/BaseTestCase.py | import os
from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer
from SpiffWorkflow.camunda.serializer import DEFAULT_CONFIG
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase
registry = BpmnWorkflowSerializer.configure(DEFAULT_CONFIG)
__author__ = 'danfunk'
class BaseTestCase(BpmnWorkflowTestCase):
""" Provides some basic tools for loading up and parsing camunda BPMN files """
serializer = BpmnWorkflowSerializer(registry)
def get_parser(self, filename, dmn_filename=None):
f = os.path.join(os.path.dirname(__file__), 'data', filename)
parser = CamundaParser()
parser.add_bpmn_files_by_glob(f)
if dmn_filename is not None:
dmn = os.path.join(os.path.dirname(__file__), 'data', 'dmn', dmn_filename)
parser.add_dmn_files_by_glob(dmn)
return parser
def load_workflow_spec(self, filename, process_name, dmn_filename=None):
parser = self.get_parser(filename, dmn_filename)
top_level_spec = parser.get_spec(process_name)
subprocesses = parser.get_subprocess_specs(process_name)
return top_level_spec, subprocesses
def load_collaboration(self, filename, collaboration_name, dmn_filename=None):
parser = self.get_parser(filename, dmn_filename)
return parser.get_collaboration(collaboration_name)
def reload_save_restore(self):
self.save_restore()
| 1,504 | Python | .py | 28 | 47.214286 | 86 | 0.737381 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
954 | UserTaskParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/UserTaskParserTest.py | from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
class UserTaskParserTest(BaseTestCase):
def setUp(self):
self.spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact')
def testGetForm(self):
form = self.spec.task_specs['Task_User_Select_Type'].form
self.assertIsNotNone(form)
def testGetEnumField(self):
form = self.spec.task_specs['Task_User_Select_Type'].form
self.assertEqual("Fact", form.key)
self.assertEqual(1, len(form.fields))
self.assertEqual("type", form.fields[0].id)
self.assertEqual(3, len(form.fields[0].options))
def testGetFieldProperties(self):
form = self.spec.task_specs['Task_User_Select_Type'].form
self.assertEqual(1, len(form.fields[0].properties))
self.assertEqual('description', form.fields[0].properties[0].id)
self.assertEqual('Choose from the list of available types of random facts', form.fields[0].properties[0].value)
def testGetFieldValidation(self):
form = self.spec.task_specs['Task_User_Select_Type'].form
self.assertEqual(1, len(form.fields[0].validation))
self.assertEqual('maxlength', form.fields[0].validation[0].name)
self.assertEqual('25', form.fields[0].validation[0].config)
def testNoFormDoesNotBombOut(self):
self.load_workflow_spec('no_form.bpmn', 'no_form')
self.assertTrue(True) # You can load a user task that has no form and you can still get here.
def testCreateTask(self):
pass
| 1,560 | Python | .py | 28 | 47.928571 | 119 | 0.69908 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
955 | MultiInstanceDMNTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/MultiInstanceDMNTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.bpmn.script_engine import PythonScriptEngine, TaskDataEnvironment
from .BaseTestCase import BaseTestCase
class MultiInstanceDMNTest(BaseTestCase):
def setUp(self):
self.spec, subprocesses = self.load_workflow_spec(
'DMNMultiInstance.bpmn', 'Process_1', 'test_integer_decision_multi.dmn')
self.workflow = BpmnWorkflow(self.spec)
self.script_engine = PythonScriptEngine(environment=TaskDataEnvironment())
self.workflow.script_engine = self.script_engine
def testDmnHappy(self):
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D')
def testDmnSaveRestore(self):
self.save_restore()
self.workflow.do_engine_steps()
self.workflow.run_next()
self.save_restore()
self.workflow.do_engine_steps()
self.workflow.run_next()
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D')
| 1,124 | Python | .py | 24 | 38.916667 | 84 | 0.697053 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
956 | ExternalMessageBoundaryEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ExternalMessageBoundaryEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow, BpmnEvent
from SpiffWorkflow.camunda.specs.event_definitions import MessageEventDefinition
from .BaseTestCase import BaseTestCase
__author__ = 'kellym'
class ExternalMessageBoundaryTest(BaseTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('external_message.bpmn', 'Process_1iggtmi')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self, save_restore=False):
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(1, len(ready_tasks),'Expected to have only one ready task')
self.workflow.catch(BpmnEvent(
MessageEventDefinition('Interrupt'),
{'result_var': 'interrupt_var', 'payload': 'SomethingImportant'}
))
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(2,len(ready_tasks),'Expected to have two ready tasks')
# item 1 should be at 'Pause'
self.assertEqual('Pause',ready_tasks[1].task_spec.bpmn_name)
self.assertEqual('SomethingImportant', ready_tasks[1].data['interrupt_var'])
self.assertEqual(True, ready_tasks[1].data['caughtinterrupt'])
self.assertEqual('Meaningless User Task',ready_tasks[0].task_spec.bpmn_name)
self.assertEqual(False, ready_tasks[0].data['caughtinterrupt'])
ready_tasks[1].run()
self.workflow.do_engine_steps()
self.workflow.catch(BpmnEvent(
MessageEventDefinition('reset'),
{'result_var': 'reset_var', 'payload': 'SomethingDrastic'}
))
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
# The user activity was cancelled and we should continue from the boundary event
self.assertEqual(2, len(ready_tasks), 'Expected to have two ready tasks')
event = self.workflow.get_next_task(spec_name='Event_19detfv')
event.run()
self.assertEqual('SomethingDrastic', event.data['reset_var'])
self.assertEqual(False, event.data['caughtinterrupt'])
| 2,355 | Python | .py | 43 | 46.627907 | 96 | 0.701869 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
957 | ParseMultiInstanceTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ParseMultiInstanceTest.py | from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from .BaseTestCase import BaseTestCase
# NB: I realize this is bad form, but MultiInstanceDMNTest uses a sequential MI task so I'm not adding tests
# for that here. The task specs are updated the same way, so this should be sufficient.
# I'm not testing the specific of operation here either, because that is pretty extensively tested in the
# main BPMN package
class ParseMultiInstanceTest(BaseTestCase):
def testCollectionInCardinality(self):
spec, subprocesses = self.load_workflow_spec('parallel_multiinstance_cardinality.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
start = self.workflow.task_tree
start.data = {'input_data': [1, 2, 3]}
self.workflow.do_engine_steps()
self.save_restore()
task_spec = self.workflow.get_next_task(spec_name='any_task').task_spec
self.assertEqual(task_spec.data_input.bpmn_id, 'input_data')
self.assertEqual(task_spec.data_output.bpmn_id, 'output_data')
self.assertEqual(task_spec.input_item.bpmn_id, 'output_item')
self.assertEqual(task_spec.output_item.bpmn_id, 'output_item')
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
for task in ready_tasks:
task.data['output_item'] = task.data['output_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'input_data': [1, 2, 3], 'output_data': [2, 4, 6]})
def testIntegerCardinality(self):
spec, subprocesses = self.load_workflow_spec('parallel_multiinstance_cardinality.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
task_spec = self.workflow.get_next_task(spec_name='any_task').task_spec
task_spec.cardinality = 'len(input_data)'
start = self.workflow.task_tree
start.data = {'input_data': [1, 2, 3]}
self.workflow.do_engine_steps()
self.save_restore()
self.assertEqual(task_spec.data_input, None)
self.assertEqual(task_spec.input_item.bpmn_id, 'output_item')
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
for task in ready_tasks:
task.data['output_item'] = task.data['output_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'input_data': [1, 2, 3], 'output_data': [0, 2, 4]})
def testCollection(self):
spec, subprocesses = self.load_workflow_spec('parallel_multiinstance_collection.bpmn', 'main')
self.workflow = BpmnWorkflow(spec)
start = self.workflow.get_next_task(end_at_spec='Start')
start.data = {'input_data': [1, 2, 3]}
self.workflow.do_engine_steps()
self.save_restore()
task_spec = self.workflow.get_next_task(spec_name='any_task').task_spec
self.assertEqual(task_spec.data_input.bpmn_id, 'input_data')
self.assertEqual(task_spec.data_output.bpmn_id, 'input_data')
self.assertEqual(task_spec.input_item.bpmn_id, 'input_item')
self.assertEqual(task_spec.output_item.bpmn_id, 'input_item')
ready_tasks = self.get_ready_user_tasks()
self.assertEqual(len(ready_tasks), 3)
for task in ready_tasks:
task.data['input_item'] = task.data['input_item'] * 2
task.run()
self.workflow.do_engine_steps()
self.assertTrue(self.workflow.completed)
self.assertDictEqual(self.workflow.data, {'input_data': [2, 4, 6]})
| 3,797 | Python | .py | 68 | 47.147059 | 108 | 0.668106 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
958 | NIMessageBoundaryTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/NIMessageBoundaryTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
__author__ = 'kellym'
class NIMessageBoundaryTest(BaseTestCase):
"""
Non-Interrupting Timer boundary test
"""
def setUp(self):
spec, subprocesses = self.load_collaboration('noninterrupting-MessageBoundary.bpmn', 'Collaboration_0fh00ao')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(1, len(ready_tasks))
self.workflow.run_task_from_id(ready_tasks[0].id)
self.workflow.do_engine_steps()
# first we run through a couple of steps where we answer No to each
# question
answers = {'Activity_WorkLate':('flag_task','No'),
'Activity_DoWork': ('work_done','No')}
for x in range(3):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
for task in ready_tasks:
response = answers.get(task.task_spec.name,None)
self.assertEqual(response is None,
False,
'We got a ready task that we did not expect - %s'%(
task.task_spec.name))
task.data[response[0]] = response[1]
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
# if we have a list of tasks - that list becomes invalid
# after we do a save restore, so I'm completing the list
# before doing the save restore.
if save_restore:
self.save_restore()
answers = {'Activity_WorkLate':('flag_task','Yes'),
'Activity_DoWork': ('work_done','No'),
'Activity_WorkLateReason':('work_late_reason','covid-19')}
for x in range(3):
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
for task in ready_tasks:
response = answers.get(task.task_spec.name,None)
self.assertEqual(response is None,
False,
'We got a ready task that we did not expect - %s'%(
task.task_spec.name))
task.data[response[0]] = response[1]
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(ready_tasks),1)
task = ready_tasks[0]
self.assertEqual(task.task_spec.name,'Activity_DoWork')
task.data['work_done'] = 'Yes'
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(len(ready_tasks), 1)
task = ready_tasks[0]
self.assertEqual(task.task_spec.name, 'Activity_WorkCompleted')
task.data['work_completed'] = 'Lots of Stuff'
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
self.assertEqual(self.workflow.completed,True)
self.assertEqual(self.workflow.last_task.data,{'Event_InterruptBoundary_Response': 'Youre late!',
'flag_task': 'Yes',
'work_done': 'Yes',
'work_completed': 'Lots of Stuff',
'work_late_reason': 'covid-19'})
| 3,958 | Python | .py | 76 | 37.171053 | 117 | 0.568769 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
959 | BusinessRuleTaskParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/BusinessRuleTaskParserTest.py | import unittest
from unittest.mock import patch
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
class BusinessRuleTaskParserTest(BaseTestCase):
def setUp(self):
self.spec, subprocesses = self.load_workflow_spec(
'ExclusiveGatewayIfElseAndDecision.bpmn',
'Process_1',
'test_integer_decision.dmn')
self.workflow = BpmnWorkflow(self.spec)
def testDmnHappy(self):
self.workflow.get_next_task(state=TaskState.READY).set_data(x=3)
self.workflow.do_engine_steps()
self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'})
self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'})
def testDmnSaveRestore(self):
self.workflow.get_next_task(state=TaskState.READY).set_data(x=3)
self.save_restore()
self.workflow.do_engine_steps()
self.save_restore()
self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'})
self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'})
@patch('SpiffWorkflow.dmn.engine.DMNEngine.DMNEngine.evaluate')
def testDmnExecHasAccessToTask(self, mock_engine):
"""At one time, the Execute and Evaluate methods received a Task object
but the DMN evaluate method did not get a task object. While this is
an optional argument, it should always exist if executed in the context
of a BPMNWorkflow"""
self.workflow.get_next_task(state=TaskState.READY).set_data(x=3)
self.workflow.do_engine_steps()
task = self.workflow.get_next_task(spec_name='TaskDecision')
name, args, kwargs = mock_engine.mock_calls[0]
self.assertIn(task, args)
def testDmnUsesSameScriptEngineAsBPMN(self):
self.workflow.get_next_task(state=TaskState.READY).set_data(x=3)
self.workflow.do_engine_steps()
| 1,935 | Python | .py | 38 | 43.263158 | 79 | 0.694386 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
960 | ResetTokenSubWorkflowTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
__author__ = 'kellym'
class ResetTokenTestSubProcess(BaseTestCase):
"""The example bpmn diagram tests both a set cardinality from user input
as well as looping over an existing array."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('token_trial_subprocess.bpmn', 'token')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self, save_restore=False):
"""
Test a complicated parallel matrix, complete the matrix and
Reset somewhere in the middle. It should complete the row that we
Reset to, and retain all previous answers.
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormA1':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
task.run()
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
#NB - this won't test random access
steps = [{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'a1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'a2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'a3'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
task.run()
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'a1',
'A2': 'a2',
'A3': 'a3',
'D': 'd'},
self.workflow.last_task.data)
def actual_test2(self, save_restore=False,reset_data=False):
"""
Test a complicated parallel matrix,
Complete several items in the parallel matrix, but do not complete it,
Reset to a previous version on another branch of the parallel, it should
complete that branch and then pick up where we left off.
Also, after we reset the branch, there should then be three tasks ready,
A2,B3,and C1
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
{'taskname': 'FormB1',
'formvar': 'B1',
'answer': 'xb1'},
{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'xb2'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormA2':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
#NB - this won't test random access
steps = [{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'a2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'a3'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'b3'},
{'taskname': 'FormC1',
'formvar': 'C1',
'answer': 'c1'},
{'taskname': 'FormC2',
'formvar': 'C2',
'answer': 'c2'},
{'taskname': 'FormC3',
'formvar': 'C3',
'answer': 'c3'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
readytasks = [t.task_spec.name for t in self.get_ready_user_tasks()]
self.assertEqual(readytasks,['FormA2','FormB3','FormC1'])
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'xa1',
'A2': 'a2',
'A3': 'a3',
'B1': 'xb1',
'B2': 'xb2',
'B3': 'b3',
'C1': 'c1',
'C2': 'c2',
'C3': 'c3',
'D': 'd'},
self.workflow.last_task.data)
| 6,735 | Python | .py | 162 | 26.364198 | 92 | 0.46343 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
961 | InvalidBusinessRuleTaskParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/InvalidBusinessRuleTaskParserTest.py | import unittest
from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from SpiffWorkflow.exceptions import SpiffWorkflowException, WorkflowException
from .BaseTestCase import BaseTestCase
class BusinessRuleTaskParserTest(BaseTestCase):
def setUp(self):
self.spec, subproceses = self.load_workflow_spec(
'invalid/InvalidDecision.bpmn', 'Process_1', 'invalid_decision.dmn')
self.workflow = BpmnWorkflow(self.spec)
def testExceptionPrint(self):
e1 = Exception("test 1")
e = SpiffWorkflowException("test")
def testDmnRaisesTaskErrors(self):
self.workflow = BpmnWorkflow(self.spec)
self.workflow.get_next_task(state=TaskState.READY).set_data(x=3)
try:
self.workflow.do_engine_steps()
self.assertTrue(False, "An error should have been raised.")
except WorkflowException as we:
self.assertTrue(True, "An error was raised..")
self.assertEqual("InvalidDecisionTaskId", we.task_spec.name)
self.maxDiff = 1000
self.assertEqual("Error evaluating expression 'spam= 1'. Rule failed on row 1. Business Rule Task 'Invalid Decision'.", str(we))
| 1,221 | Python | .py | 24 | 42.916667 | 140 | 0.711765 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
962 | CamundaParserTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/CamundaParserTest.py | from SpiffWorkflow.bpmn.parser.util import full_tag
from SpiffWorkflow.camunda.specs import UserTask, BusinessRuleTask
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
from SpiffWorkflow.camunda.parser.task_spec import UserTaskParser, BusinessRuleTaskParser
from .BaseTestCase import BaseTestCase
class CamundaParserTest(BaseTestCase):
def setUp(self):
self.parser = CamundaParser()
def test_overrides(self):
overrides = [
('userTask', UserTaskParser, UserTask),
('businessRuleTask', BusinessRuleTaskParser, BusinessRuleTask),
]
for key, parser, spec in overrides:
self.assertIn(full_tag(key), self.parser.OVERRIDE_PARSER_CLASSES)
self.assertEqual((parser, spec), self.parser.OVERRIDE_PARSER_CLASSES.get(full_tag(key)))
| 835 | Python | .py | 16 | 45.25 | 100 | 0.751232 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
963 | SubWorkflowTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/SubWorkflowTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
__author__ = 'kellym'
class SubWorkflowTest(BaseTestCase):
"""The tests a somewhat complex subworkflow and verifies that it does
what we expect"""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('subWorkflowComplex.bpmn', 'SubWorkflow')
self.workflow = BpmnWorkflow(spec, subprocesses)
self.workflow.do_engine_steps()
self.answers = ['A','A1','A2','B']
def testRunThroughHappy(self):
self.actual_test(False)
def testRunThroughSaveRestore(self):
self.actual_test(True)
def actual_test(self, save_restore=False):
# Set initial array size to 3 in the first user form.
for answer in self.answers:
task = self.get_ready_user_tasks()[0]
self.assertEqual("Activity_"+answer, task.task_spec.name)
task.set_data(**{"Field"+answer: answer})
task.run()
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertEqual(self.workflow.last_task.data,{'FieldA': 'A',
'FieldA1': 'A1',
'FieldA2': 'A2',
'FieldB': 'B'})
self.assertTrue(self.workflow.completed)
| 1,486 | Python | .py | 31 | 34.903226 | 94 | 0.588235 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
964 | ResetTokenNestedParallelTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
__author__ = 'kellym'
class ResetTokenTestNestedParallel(BaseTestCase):
"""The example bpmn diagram tests both a set cardinality from user input
as well as looping over an existing array."""
def setUp(self):
spec, subprocesses = self.load_workflow_spec('token_trial_nested_parallel.bpmn', 'token')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testRunThroughSaveRestore(self):
self.actual_test(save_restore=True)
def testRunThroughHappyAlt(self):
self.actual_test2(save_restore=False)
def testRunThroughSaveRestoreAlt(self):
self.actual_test2(save_restore=True)
def actual_test(self, save_restore=False,reset_data=False):
"""
Test a complicated parallel matrix, complete the matrix and
Reset somewhere in the middle. It should complete the row that we
Reset to, and retain all previous answers.
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
{'taskname': 'FormB1',
'formvar': 'B1',
'answer': 'xb1'},
{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'xb2'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'xb3'},
{'taskname': 'FormC1',
'formvar': 'C1',
'answer': 'xc1'},
{'taskname': 'FormC2',
'formvar': 'C2',
'answer': 'xc2'},
{'taskname': 'FormC3',
'formvar': 'C3',
'answer': 'xc3'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormB2':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
self.workflow.do_engine_steps()
#NB - this won't test random access
steps = [{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'b2'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.get_ready_user_tasks()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'xa1',
'A2': 'xa2',
'A3': 'xa3',
'B1': 'xb1',
'B2': 'b2',
'B3': 'xb3',
'C1': 'xc1',
'C2': 'xc2',
'C3': 'xc3',
'D': 'd'},
self.workflow.last_task.data)
def actual_test2(self, save_restore=False,reset_data=False):
"""
Test a complicated parallel matrix,
Complete several items in the parallel matrix, but do not complete it,
Reset to a previous version on another branch of the parallel, it should
complete that branch and then pick up where we left off.
Also, after we reset the branch, there should then be three tasks ready,
A2,B3,and C1
"""
self.workflow.do_engine_steps()
firsttaskid = None
steps = [{'taskname':'First',
'formvar': 'First',
'answer': 'Yes'},
{'taskname': 'FormA1',
'formvar': 'A1',
'answer': 'xa1'},
{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'xa2'},
{'taskname': 'FormA3',
'formvar': 'A3',
'answer': 'xa3'},
{'taskname': 'FormB1',
'formvar': 'B1',
'answer': 'xb1'},
{'taskname': 'FormB2',
'formvar': 'B2',
'answer': 'xb2'},
]
for step in steps:
task = self.get_ready_user_tasks()[0]
if firsttaskid is None and step['taskname']=='FormA2':
firsttaskid = task.id
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.workflow.reset_from_task_id(firsttaskid)
#NB - this won't test random access
steps = [{'taskname': 'FormA2',
'formvar': 'A2',
'answer': 'a2'},
{'taskname': 'FormB3',
'formvar': 'B3',
'answer': 'b3'},
{'taskname': 'FormC1',
'formvar': 'C1',
'answer': 'c1'},
{'taskname': 'FormC2',
'formvar': 'C2',
'answer': 'c2'},
{'taskname': 'FormC3',
'formvar': 'C3',
'answer': 'c3'},
{'taskname': 'FormD',
'formvar': 'D',
'answer': 'd'},
]
readytasks = [t.task_spec.name for t in self.get_ready_user_tasks()]
self.assertEqual(readytasks,['FormA2','FormB3','FormC1','FormC2','FormC3'])
for step in steps:
task = self.get_ready_user_tasks()[0]
self.assertEqual(step['taskname'], task.task_spec.name)
task.set_data(**{step['formvar']: step['answer']})
self.workflow.run_task_from_id(task.id)
self.workflow.do_engine_steps()
if save_restore:
self.save_restore()
self.assertTrue(self.workflow.completed)
self.assertEqual({'First': 'Yes',
'A1': 'xa1',
'A2': 'a2',
'A3': 'xa3',
'B1': 'xb1',
'B2': 'xb2',
'B3': 'b3',
'C1': 'c1',
'C2': 'c2',
'C3': 'c3',
'D': 'd'},
self.workflow.last_task.data)
| 7,656 | Python | .py | 183 | 26.333333 | 97 | 0.458999 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
965 | StartMessageEventTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/StartMessageEventTest.py | from SpiffWorkflow import TaskState
from SpiffWorkflow.bpmn import BpmnWorkflow
from .BaseTestCase import BaseTestCase
__author__ = 'kellym'
class StartMessageTest(BaseTestCase):
def setUp(self):
self.spec, self.subprocesses = self.load_collaboration('message_test.bpmn', 'Collaboration_0n93bdm')
self.workflow = BpmnWorkflow(self.spec, self.subprocesses)
def testParserCanReturnStartMessages(self):
parser = self.get_parser('message_test.bpmn')
self.assertEqual(parser.process_parsers['ThrowCatch'].start_messages(), ['ApprovalRequest'])
parser = self.get_parser('random_fact.bpmn')
self.assertEqual(parser.process_parsers['random_fact'].start_messages(), [])
def testRunThroughHappy(self):
self.actual_test(save_restore=False)
def testThroughSaveRestore(self):
self.actual_test(save_restore=True)
def actual_test(self,save_restore = False):
steps = [('Activity_EnterPlan',{'plan_details':'Bad'}),
('Activity_ApproveOrDeny', {'approved':'No'}),
('Activity_EnterPlan', {'plan_details':'Better'}),
('Activity_ApproveOrDeny', {'approved':'No'}),
('Activity_EnterPlan', {'plan_details':'Best'}),
('Activity_ApproveOrDeny', {'approved':'Yes'}),
('Activity_EnablePlan',{'Done':'OK!'})]
self.workflow.do_engine_steps() # get around start task
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
waiting_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(1, len(ready_tasks),'Expected to have one ready task')
self.assertEqual(1, len(waiting_tasks), 'Expected to have one waiting task')
for step in steps:
current_task = ready_tasks[0]
self.assertEqual(current_task.task_spec.name,step[0])
current_task.set_data(**step[1])
current_task.run()
self.workflow.do_engine_steps()
self.workflow.refresh_waiting_tasks()
if save_restore:
self.save_restore()
ready_tasks = self.workflow.get_tasks(state=TaskState.READY)
self.assertEqual(self.workflow.completed,True,'Expected the workflow to be complete at this point')
self.assertEqual(self.workflow.last_task.data,
{
'plan_details': 'Best',
'ApprovalResult': 'Yes',
'Done': 'OK!'
})
| 2,503 | Python | .py | 47 | 42.574468 | 108 | 0.63815 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
966 | UserTaskSpecTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py | import unittest
from SpiffWorkflow.camunda.specs.user_task import FormField, UserTask, Form, EnumFormField
from SpiffWorkflow.camunda.serializer.task_spec import UserTaskConverter
from SpiffWorkflow.bpmn.serializer.helpers.dictionary import DictionaryConverter
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
class UserTaskSpecTest(unittest.TestCase):
def create_instance(self):
if 'testtask' in self.wf_spec.task_specs:
del self.wf_spec.task_specs['testtask']
self.form = Form()
return UserTask(self.wf_spec, 'userTask', self.form)
def setUp(self):
self.wf_spec = WorkflowSpec()
self.user_spec = self.create_instance()
def testConstructor(self):
self.assertEqual(self.user_spec.name, 'userTask')
self.assertEqual(self.user_spec.data, {})
self.assertEqual(self.user_spec.defines, {})
self.assertEqual(self.user_spec.pre_assign, [])
self.assertEqual(self.user_spec.post_assign, [])
def test_set_form(self):
self.assertEqual(self.form, self.user_spec.form)
def testSerialize(self):
field1 = FormField(form_type="text")
field1.id = "quest"
field1.label = "What is your quest?"
field1.default_value = "I seek the grail!"
field2 = EnumFormField()
field2.id = "color"
field2.label = "What is your favorite color?"
field2.add_option("red", "Red")
field2.add_option("orange", "Green")
field2.add_option("yellow", "Yellow")
field2.add_option("green", "Green")
field2.add_option("blue", "Blue")
field2.add_option("indigo", "Indigo")
field2.add_option("violet", "Violet")
field2.add_option("other", "Other")
field2.add_property("description", "You know what to do.")
field2.add_validation("maxlength", "25")
self.form.key = "formKey"
self.form.add_field(field1)
self.form.add_field(field2)
converter = UserTaskConverter(UserTask, DictionaryConverter())
dct = converter.to_dict(self.user_spec)
self.assertEqual(dct['name'], 'userTask')
self.assertEqual(dct['form'], {
"fields": [
{
"default_value": "I seek the grail!",
"label": "What is your quest?",
"id": "quest",
"properties": [],
"type": "text",
"validation": [],
},
{
"default_value": "",
"id": "color",
"label": "What is your favorite color?",
"options": [
{"id": "red", "name": "Red"},
{"id": "orange", "name": "Green"},
{"id": "yellow", "name": "Yellow"},
{"id": "green", "name": "Green"},
{"id": "blue", "name": "Blue"},
{"id": "indigo", "name": "Indigo"},
{"id": "violet", "name": "Violet"},
{"id": "other", "name": "Other"},
],
"properties": [
{"id": "description", "value": "You know what to do."},
],
"type": "enum",
"validation": [
{"name": "maxlength", "config": "25"},
],
}
],
"key": "formKey",
})
def test_text_field(self):
form_field = FormField(form_type="text")
form_field.id = "1234"
self.form.add_field(form_field)
self.assertEqual(form_field, self.user_spec.form.fields[0])
def test_enum_field(self):
enum_field = EnumFormField()
enum_field.label = "Which kind of fool are you"
enum_field.add_option('old fool', 'This is old, therefor it is good.')
enum_field.add_option('new fool',
'This is new, therefor it is better.')
self.form.add_field(enum_field)
self.assertEqual(enum_field, self.user_spec.form.fields[-1])
def test_properties(self):
form_field = FormField(form_type="text")
self.assertFalse(form_field.has_property("wilma"))
form_field.add_property("wilma", "flintstone")
self.assertTrue(form_field.has_property("wilma"))
self.assertEqual("flintstone", form_field.get_property("wilma"))
def test_validations(self):
form_field = FormField(form_type="text")
self.assertFalse(form_field.has_validation("barney"))
form_field.add_validation("barney", "rubble")
self.assertTrue(form_field.has_validation("barney"))
self.assertEqual("rubble", form_field.get_validation("barney"))
def testIsEngineTask(self):
self.assertTrue(self.user_spec.manual)
| 4,935 | Python | .py | 108 | 33.453704 | 90 | 0.553939 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
967 | CamundaExtensionsTest.py | sartography_SpiffWorkflow/tests/SpiffWorkflow/camunda/serializer/CamundaExtensionsTest.py | import unittest
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase
class CamundaExtensionsTest(BaseTestCase):
def setUp(self):
spec, subprocesses = self.load_workflow_spec('random_fact.bpmn', 'random_fact')
self.workflow = BpmnWorkflow(spec, subprocesses)
def testExtensionsAreSerialized(self):
self.assertMyExtension()
self.save_restore()
self.assertMyExtension()
def assertMyExtension(self):
"""Assure that we have a very specific extension on specific task."""
task = self.workflow.spec.get_task_spec_from_name("Task_User_Select_Type")
self.assertIsNotNone(task)
self.assertTrue(hasattr(task, 'extensions'))
self.assertTrue("my_extension" in task.extensions)
self.assertEqual(task.extensions["my_extension"], 'my very own extension')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(CamundaExtensionsTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 1,088 | Python | .py | 22 | 43.045455 | 87 | 0.734597 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
968 | conf.py | sartography_SpiffWorkflow/doc/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'SpiffWorkflow'
copyright = '2024, Sartography'
author = 'Sartography'
# The full version, including alpha/beta/rc tags
release = '3.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx_rtd_theme',
]
# Configure links to example repo
branch = 'main'
extlinks = {
'example': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/' + '%s', '%s'),
'bpmn': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/bpmn/tutorial/' + '%s', '%s'),
'form': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/bpmn/tutorial/forms/' + '%s', '%s'),
'app': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/spiff_example/' + '%s', '%s'),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Set the master index file.
master_doc = 'index'
# Set the fav-icon
html_favicon = 'favicon.ico'
| 2,714 | Python | .py | 56 | 45.642857 | 114 | 0.659599 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
969 | serializer.py | sartography_SpiffWorkflow/doc/core/custom-tasks/serializer.py | from SpiffWorkflow.serializer.json import JSONSerializer
from strike import NuclearStrike
class NuclearSerializer(JSONSerializer):
def serialize_nuclear_strike(self, task_spec):
return self.serialize_task_spec(task_spec)
def deserialize_nuclear_strike(self, wf_spec, s_state):
spec = NuclearStrike(wf_spec, s_state['name'])
self.deserialize_task_spec(wf_spec, s_state, spec=spec)
return spec
| 434 | Python | .py | 9 | 42.555556 | 63 | 0.749409 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
970 | strike.py | sartography_SpiffWorkflow/doc/core/custom-tasks/strike.py | from SpiffWorkflow.specs.Simple import Simple
class NuclearStrike(Simple):
def _on_complete_hook(self, my_task):
print((self.my_variable, "sent!"))
def serialize(self, serializer):
return serializer.serialize_nuclear_strike(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_nuclear_strike(wf_spec, s_state)
| 402 | Python | .py | 9 | 38.888889 | 70 | 0.730769 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
971 | start.py | sartography_SpiffWorkflow/doc/core/custom-tasks/start.py | from SpiffWorkflow.workflow import Workflow
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from serializer import NuclearSerializer
# Load from JSON
with open('nuclear.json') as fp:
workflow_json = fp.read()
nuclear_serializer = NuclearSerializer()
spec = WorkflowSpec.deserialize(nuclear_serializer, workflow_json)
# Create the workflow.
workflow = Workflow(spec)
# Execute until all tasks are done or require manual intervention.
# For the sake of this tutorial, we ignore the "manual" flag on the
# tasks. In practice, you probably don't want to do that.
workflow.run_all(halt_on_manual=False)
| 615 | Python | .py | 14 | 42.428571 | 67 | 0.811037 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
972 | deserialize-wf.py | sartography_SpiffWorkflow/doc/core/tutorial/deserialize-wf.py | from SpiffWorkflow.workflow import Workflow
from SpiffWorkflow.serializer.json import JSONSerializer
serializer = JSONSerializer()
with open('workflow.json') as fp:
workflow_json = fp.read()
workflow = Workflow.deserialize(serializer, workflow_json)
| 255 | Python | .py | 6 | 40.666667 | 58 | 0.826613 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
973 | nuclear.py | sartography_SpiffWorkflow/doc/core/tutorial/nuclear.py | from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.specs.ExclusiveChoice import ExclusiveChoice
from SpiffWorkflow.specs.Simple import Simple
from SpiffWorkflow.specs.Cancel import Cancel
from SpiffWorkflow.operators import Equal, Attrib
def my_nuclear_strike(msg):
print("Launched:", msg)
class NuclearStrikeWorkflowSpec(WorkflowSpec):
def __init__(self):
WorkflowSpec.__init__(self)
# The first step of our workflow is to let the general confirm
# the nuclear strike.
general_choice = ExclusiveChoice(self, 'general')
self.start.connect(general_choice)
# The default choice of the general is to abort.
cancel = Cancel(self, 'workflow_aborted')
general_choice.connect(cancel)
# Otherwise, we will ask the president to confirm.
president_choice = ExclusiveChoice(self, 'president')
cond = Equal(Attrib('confirmation'), 'yes')
general_choice.connect_if(cond, president_choice)
# The default choice of the president is to abort.
president_choice.connect(cancel)
# Otherwise, we will perform the nuclear strike.
strike = Simple(self, 'nuclear_strike')
president_choice.connect_if(cond, strike)
# Now we connect our Python function to the Task named 'nuclear_strike'
strike.completed_event.connect(my_nuclear_strike)
# As soon as all tasks are either "completed" or "aborted", the
# workflow implicitely ends.
| 1,518 | Python | .py | 30 | 43.433333 | 79 | 0.715348 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
974 | start.py | sartography_SpiffWorkflow/doc/core/tutorial/start.py | from SpiffWorkflow.workflow import Workflow
from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.serializer.json import JSONSerializer
# Load from JSON
with open('nuclear.json') as fp:
workflow_json = fp.read()
serializer = JSONSerializer()
spec = WorkflowSpec.deserialize(serializer, workflow_json)
# Alternatively, create an instance of the Python based specification.
#from nuclear import NuclearStrikeWorkflowSpec
#spec = NuclearStrikeWorkflowSpec()
# Create the workflow.
workflow = Workflow(spec)
# Execute until all tasks are done or require manual intervention.
# For the sake of this tutorial, we ignore the "manual" flag on the
# tasks. In practice, you probably don't want to do that.
workflow.run_all(halt_on_manual=False)
# Alternatively, this is what a UI would do for a manual task.
#workflow.complete_task_from_id(...)
| 868 | Python | .py | 19 | 44.210526 | 70 | 0.810427 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
975 | deserialize.py | sartography_SpiffWorkflow/doc/core/tutorial/deserialize.py | from SpiffWorkflow.specs.WorkflowSpec import WorkflowSpec
from SpiffWorkflow.serializer.json import JSONSerializer
serializer = JSONSerializer()
with open('workflow-spec.json') as fp:
workflow_json = fp.read()
spec = WorkflowSpec.deserialize(serializer, workflow_json)
| 274 | Python | .py | 6 | 43.833333 | 58 | 0.831461 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
976 | serialize-wf.py | sartography_SpiffWorkflow/doc/core/tutorial/serialize-wf.py | import json
from SpiffWorkflow.workflow import Workflow
from SpiffWorkflow.serializer.json import JSONSerializer
from nuclear import NuclearStrikeWorkflowSpec
serializer = JSONSerializer()
spec = NuclearStrikeWorkflowSpec()
workflow = Workflow(spec)
data = workflow.serialize(serializer)
# This next line is unnecessary in practice; it just makes the JSON pretty.
pretty = json.dumps(json.loads(data), indent=4, separators=(',', ': '))
open('workflow.json', 'w').write(pretty)
| 480 | Python | .py | 11 | 42.363636 | 75 | 0.811159 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
977 | serialize.py | sartography_SpiffWorkflow/doc/core/tutorial/serialize.py | import json
from SpiffWorkflow.serializer.json import JSONSerializer
from nuclear import NuclearStrikeWorkflowSpec
serializer = JSONSerializer()
spec = NuclearStrikeWorkflowSpec()
data = spec.serialize(serializer)
# This next line is unnecessary in practice; it just makes the JSON pretty.
pretty = json.dumps(json.loads(data), indent=4, separators=(',', ': '))
open('workflow-spec.json', 'w').write(pretty)
| 411 | Python | .py | 9 | 44.333333 | 75 | 0.799499 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
978 | workflow.py | sartography_SpiffWorkflow/SpiffWorkflow/workflow.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import logging
from .task import Task
from .util.task import TaskState, TaskIterator, TaskFilter
from .util.compat import mutex
from .util.event import Event
from .exceptions import TaskNotFoundException, WorkflowException
logger = logging.getLogger('spiff.workflow')
class Workflow(object):
"""The instantiation of a `WorkflowSpec`.
Reprsents the state of a running workflow and its data.
Attributes:
spec (`WorkflowSpec`): the spec that describes this workflow instance
data (dict): the data associated with the workflow
locks (dict): a dictionary holding locaks used by Mutex tasks
last_task (`Task`): the last successfully completed task
success (bool): whether the workflow was successful
tasks (dict(id, `Task`)): a mapping of task ids to tasks
task_tree (`Task`): the root task of this workflow's task tree
completed_event (`Event`): an event holding callbacks to be run when the workflow completes
"""
def __init__(self, workflow_spec, deserializing=False):
"""
Parameters:
workflow_spec (`WorkflowSpec`): the spec that describes this workflow
deserializing (bool): whether this workflow is being deserialized
"""
self.spec = workflow_spec
self.data = {}
self.locks = {}
self.last_task = None
self.success = True
self.tasks = {}
self.completed = False
# Events.
self.completed_event = Event()
if not deserializing:
self.task_tree = Task(self, self.spec.start, state=TaskState.FUTURE)
self.task_tree.task_spec._predict(self.task_tree, mask=TaskState.NOT_FINISHED_MASK)
logger.info('Initialized workflow', extra=self.collect_log_extras())
self.task_tree._ready()
def is_completed(self):
"""Checks whether the workflow is complete.
Returns:
bool: True if the workflow has no unfinished tasks
"""
if not self.completed:
iter = TaskIterator(self.task_tree, state=TaskState.NOT_FINISHED_MASK)
try:
next(iter)
except StopIteration:
self.completed = True
return self.completed
def manual_input_required(self):
"""Checks whether the workflow requires manual input.
Returns:
bool: True if the workflow cannot proceed until manual tasks are complete
"""
iter = TaskIterator(self.task_tree, state=TaskState.READY, manual=False)
try:
next(iter)
except StopIteration:
return True
return False
def get_tasks(self, first_task=None, **kwargs):
"""Returns a list of `Task`s that meet the conditions specified `kwargs`, starting from the root by default.
Notes:
Keyword args are passed directly to `get_tasks_iterator`
Returns:
list(`Task`): the tasks that match the filtering conditions
"""
return [t for t in self.get_tasks_iterator(first_task, **kwargs)]
def get_next_task(self, first_task=None, **kwargs):
"""Returns the next task that meets the iteration conditions, starting from the root by default.
Parameters:
first_task (`Task`): search beginning from this task
Notes:
Other keyword args are passed directly into `get_tasks_iterator`
Returns:
`Task` or None: the first task that meets the conditions or None if no tasks match
"""
iter = self.get_tasks_iterator(first_task, **kwargs)
try:
return next(iter)
except StopIteration:
return None
def get_tasks_iterator(self, first_task=None, **kwargs):
"""Returns an iterator of Tasks that meet the conditions specified `kwargs`, starting from the root by default.
Parameters:
first_task (`Task`): search beginning from this task
Notes:
Other keyword args are passed directly into `TaskIterator`
Returns:
`TaskIterator`: an iterator over the matching tasks
"""
return TaskIterator(first_task or self.task_tree, **kwargs)
def get_task_from_id(self, task_id):
"""Returns the task with the given id.
Args:
task_id: the id of the task to run
Returns:
`Task`: the task
Raises:
`TaskNotFoundException`: if the task does not exist
"""
if task_id not in self.tasks:
raise TaskNotFoundException(f'A task with id {task_id} was not found', task_spec=self.spec)
return self.tasks.get(task_id)
def run_task_from_id(self, task_id):
"""Runs the task with the given id.
Args:
task_id: the id of the task to run
"""
task = self.get_task_from_id(task_id)
return task.run()
def run_next(self, use_last_task=True, halt_on_manual=True):
"""Runs the next task, starting from the branch containing the last completed task by default.
Parameters:
use_last_task (bool): start with the currently running branch
halt_on_manual (bool): do not run tasks with `TaskSpec`s that have the `manual` attribute set
Returns:
bool: True when a task runs sucessfully
"""
first_task = self.last_task if use_last_task and self.last_task is not None else self.task_tree
task_filter = TaskFilter(
state=TaskState.READY,
manual=False if halt_on_manual else None,
)
task = self.get_next_task(first_task, task_filter=task_filter)
# If we didn't execute anything on the current branch, retry from the root task
if task is None and use_last_task:
task = self.get_next_task(self.task_tree, task_filter=task_filter)
if task is None:
# If no task was found, update any waiting tasks. Ideally, we wouldn't do this, but currently necessary.
self.update_waiting_tasks()
else:
return task.run()
def run_all(self, use_last_task=True, halt_on_manual=True):
"""Runs all possible tasks, starting from the current branch by default.
Parameters:
use_last_task (bool): start with the currently running branch
halt_on_manual (bool): do not run tasks with `TaskSpec`s that have the `manual` attribute set
"""
while self.run_next(use_last_task, halt_on_manual):
pass
def update_waiting_tasks(self):
"""Update all tasks in the WAITING state"""
for task in TaskIterator(self.task_tree, state=TaskState.WAITING):
task.task_spec._update(task)
def cancel(self, success=False):
"""Cancels all open tasks in the workflow.
Args:
success (bool): the state of the workflow
Returns:
list(`Task`): the cancelled tasks
"""
self.success = success
self.completed = True
logger.info(f'Workflow cancelled', extra=self.collect_log_extras())
cancelled = []
for task in TaskIterator(self.task_tree, state=TaskState.NOT_FINISHED_MASK):
cancelled.append(task)
for task in cancelled:
task.cancel()
return cancelled
def set_data(self, **kwargs):
"""Defines the given attribute/value pairs."""
self.data.update(kwargs)
def get_data(self, name, default=None):
"""Returns the value of the data field with the given name, or the given
default value if the data field does not exist.
Args:
name (str): the dictionary key to return
default (obj): a default value to return if the key does not exist
Returns:
the value of the key, or the default
"""
return self.data.get(name, default)
def reset_from_task_id(self, task_id, data=None):
"""Removed all descendendants of this task and set this task to be runnable.
Args:
task_id: the id of the task to reset to
data (dict): optionally replace the data (if None, data will be copied from the parent task)
Returns: extra.update(
list(`Task`): tasks removed from the tree
"""
task = self.get_task_from_id(task_id)
self.last_task = task.parent
return task.reset_branch(data)
def collect_log_extras(self, dct=None):
"""Return logging details for this workflow"""
extra = dct or {}
extra.update({
'workflow_spec': self.spec.name,
'success': self.success,
'completed': self.completed,
})
if logger.level < 20:
extra.update({'tasks': [t.id for t in Workflow.get_tasks(self)]})
return extra
def _predict(self, mask=TaskState.NOT_FINISHED_MASK):
"""Predict tasks with the provided mask"""
for task in Workflow.get_tasks(self, state=TaskState.NOT_FINISHED_MASK):
task.task_spec._predict(task, mask=mask)
def _task_completed_notify(self, task):
"""Called whenever a task completes"""
self.last_task = task
if task.task_spec.name == 'End':
self._mark_complete(task)
if self.completed:
self.completed_event(self)
else:
self.update_waiting_tasks()
def _remove_task(self, task_id):
task = self.tasks[task_id]
for child in task.children:
self._remove_task(child.id)
task.parent._children.remove(task.id)
self.tasks.pop(task_id)
def _mark_complete(self, task):
logger.info('Workflow completed', extra=self.collect_log_extras())
self.data.update(task.data)
self.completed = True
def _get_mutex(self, name):
"""Get or create a mutex"""
if name not in self.locks:
self.locks[name] = mutex()
return self.locks[name]
def get_task_mapping(self):
"""I don't know that this does.
Seriously, this returns a mapping of thread ids to tasks in that thread. It can be used to identify
tasks by branch and use this information for decision making (despite the flawed implementation
mechanism; IMO, this should be maintained by the workflow rather than a class attribute).
"""
task_mapping = {}
for task in self.task_tree:
thread_task_mapping = task_mapping.get(task.thread_id, {})
tasks = thread_task_mapping.get(task.task_spec, set())
tasks.add(task)
thread_task_mapping[task.task_spec] = tasks
task_mapping[task.thread_id] = thread_task_mapping
return task_mapping
def get_dump(self):
"""Returns a string representation of the task tree.
Returns:
str: a tree view of the current workflow state
"""
return self.task_tree.get_dump()
def dump(self):
"""Print a dump of the current task tree"""
print(self.task_tree.dump())
def serialize(self, serializer, **kwargs):
"""
Serializes a Workflow instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: object
:returns: The serialized workflow.
"""
return serializer.serialize_workflow(self, **kwargs)
@classmethod
def deserialize(cls, serializer, s_state, **kwargs):
"""
Deserializes a Workflow instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type s_state: object
:param s_state: The serialized workflow.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: Workflow
:returns: The workflow instance.
"""
return serializer.deserialize_workflow(s_state, **kwargs)
| 13,077 | Python | .py | 293 | 35.488055 | 119 | 0.638708 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
979 | operators.py | sartography_SpiffWorkflow/SpiffWorkflow/operators.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import logging
import re
logger = logging.getLogger('spiff.task')
class Term(object):
"""
Abstract base class for all operators and expressions.
"""
pass
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
def __getattr__(*args):
val = dict.get(*args)
return DotDict(val) if type(val) is dict else val
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Attrib(Term):
"""
Used for marking a value such that it is recognized to be an
attribute name by valueof().
"""
def __init__(self, name):
self.name = name
def serialize(self, serializer):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.serialize_attrib(self)
@classmethod
def deserialize(cls, serializer, s_state):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.deserialize_attrib(s_state)
class PathAttrib(Term):
"""
Used for marking a value such that it is recognized to be an
attribute obtained by evaluating a path by valueof().
"""
def __init__(self, path):
self.path = path
self.name = path
def serialize(self, serializer):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.serialize_pathattrib(self)
@classmethod
def deserialize(cls, serializer, s_state):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.deserialize_pathattrib(s_state)
class Assign(Term):
"""
Assigns a new value to an attribute. The source may be either
a static value, or another attribute.
"""
def __init__(self,
left_attribute,
right_attribute=None,
right=None,
**kwargs):
"""
Constructor.
:type left_attribute: str
:param left_attribute: The name of the attribute to which the value
is assigned.
:type right: object
:param right: A static value that, when given, is assigned to
left_attribute.
:type right_attribute: str
:param right_attribute: When given, the attribute with the given
name is used as the source (instead of the
static value).
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
if not right_attribute and not right:
raise ValueError('require argument: right_attribute or right')
if left_attribute is None:
raise ValueError('left attribute is None')
self.left_attribute = left_attribute
self.right_attribute = right_attribute
self.right = right
def assign(self, from_obj, to_obj):
# Fetch the value of the right expression.
if self.right is not None:
right = self.right
else:
right = from_obj.get_data(self.right_attribute)
to_obj.set_data(**{str(self.left_attribute): right})
def serialize(self, serializer):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.serialize_assign(self)
@classmethod
def deserialize(cls, serializer, s_state):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.deserialize_assign(s_state)
def valueof(scope, op, default=None):
if op is None:
return default
elif isinstance(op, Attrib):
if op.name not in scope.data:
logger.debug("Attrib('{op.name}') not present in task data", extra=scope.collect_log_extras({'data': scope.data}))
return scope.get_data(op.name, default)
elif isinstance(op, PathAttrib):
if not op.path:
return default
parts = op.path.split('/')
data = scope.data
for part in parts:
if part not in data:
logger.debug(f"PathAttrib('{op.name}') not present in task data", extra=scope.collect_log_extras({'data': scope.data}))
return default
data = data[part] # move down the path
return data
else:
return op
def is_number(text):
try:
int(text)
except Exception:
return False
return True
class Operator(Term):
"""
Abstract base class for all operators.
"""
def __init__(self, *args):
"""
Constructor.
"""
if len(args) == 0:
raise TypeError("Too few arguments")
self.args = args
def _get_values(self, task):
values = []
for arg in self.args:
values.append(str(valueof(task, arg)))
return values
def _matches(self, task):
raise NotImplementedError("Abstract class, do not call")
def serialize(self, serializer):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.serialize_operator(self)
@classmethod
def deserialize(cls, serializer, s_state):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:rtype: object
:returns: The serialized object.
"""
return serializer.deserialize_operator(s_state)
class Equal(Operator):
"""
This class represents the EQUAL operator.
"""
def _matches(self, task):
values = self._get_values(task)
last = values[0]
for value in values:
if value != last:
return False
last = value
return True
def serialize(self, serializer):
return serializer.serialize_operator_equal(self)
@classmethod
def deserialize(cls, serializer, s_state):
return serializer.deserialize_operator_equal(s_state)
class NotEqual(Operator):
"""
This class represents the NOT EQUAL operator.
"""
def _matches(self, task):
values = self._get_values(task)
last = values[0]
for value in values:
if value != last:
return True
last = value
return False
def serialize(self, serializer):
return serializer.serialize_operator_not_equal(self)
@classmethod
def deserialize(cls, serializer, s_state):
return serializer.deserialize_operator_not_equal(s_state)
class GreaterThan(Operator):
"""
This class represents the GREATER THAN operator.
"""
def __init__(self, left, right):
"""
Constructor.
"""
Operator.__init__(self, left, right)
def _matches(self, task):
left, right = self._get_values(task)
return int(left) > int(right)
def serialize(self, serializer):
return serializer.serialize_operator_greater_than(self)
@classmethod
def deserialize(cls, serializer, s_state):
return serializer.deserialize_operator_greater_than(s_state)
class LessThan(Operator):
"""
This class represents the LESS THAN operator.
"""
def __init__(self, left, right):
"""
Constructor.
"""
Operator.__init__(self, left, right)
def _matches(self, task):
left, right = self._get_values(task)
return int(left) < int(right)
def serialize(self, serializer):
return serializer.serialize_operator_less_than(self)
@classmethod
def deserialize(cls, serializer, s_state):
return serializer.deserialize_operator_less_than(s_state)
class Match(Operator):
"""
This class represents the regular expression match operator.
"""
def __init__(self, regex, *args):
"""
Constructor.
"""
Operator.__init__(self, *args)
self.regex = re.compile(regex)
def _matches(self, task):
for value in self._get_values(task):
if not self.regex.search(value):
return False
return True
def serialize(self, serializer):
return serializer.serialize_operator_match(self)
@classmethod
def deserialize(cls, serializer, s_state):
return serializer.deserialize_operator_match(s_state)
| 10,676 | Python | .py | 295 | 28.274576 | 136 | 0.635799 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
980 | __init__.py | sartography_SpiffWorkflow/SpiffWorkflow/__init__.py | # This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .util.task import TaskState
from .workflow import Workflow
from .version import __version__
| 869 | Python | .py | 19 | 44.684211 | 69 | 0.791519 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
981 | task.py | sartography_SpiffWorkflow/SpiffWorkflow/task.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from copy import deepcopy
import logging
import time
from uuid import uuid4
from .util.task import TaskState, TaskFilter, TaskIterator
from .util.deep_merge import DeepMerge
from .exceptions import WorkflowException
logger = logging.getLogger('spiff.task')
class Task(object):
"""Used internally for composing a tree that represents possible paths through a Workflow.
Attributes:
id (UUID): a unique identifierfor this task
workflow (`Workflow`): the workflow associated with this task
parent (`Task`): the parent of this task
children (list(`Task`)): the children of this task
triggered (bool): True if the task is not part of output specification of the task spec
task_spec (`TaskSpec`): the spec associated with this task
thread_id (int): a thread id for this task
data (dict): a dictionary containing data for this task
internal_data (dict): a dictionary containing information relevant to the task state or execution
last_state_change (float): the timestamp when this task last changed state
thread_id (int): a thread identifier
Note:
The `thread_id` attribute might be more accurately named `branch_id`, as it pertains only to workflow
structure (eg, branches between split and merge tasks) rather than anything to do with threaded execution.
Warning:
The `data` attribute represents the state of the data as this particular task is executed. It is copied from
its parent when the task is updated (this can behavior can be modified in the `TaskSpec.update` method).
This can be VERY resource intensive in large workflows or with lots of data.
"""
thread_id_pool = 0 # Pool for assigning a unique thread id to every new Task.
def __init__(self, workflow, task_spec, parent=None, state=TaskState.MAYBE, id=None):
"""
Args:
workflow (`Workflow`): the workflow this task should be added to
task_spec (`TaskSpec`): the spec associated with this task
Keyword Args:
parent (`Task`): the parent of this task
state (`TaskState`): the state of this task (default MAYBE)
id: an optional id (defaults to a random UUID)
"""
self.id = id or uuid4()
workflow.tasks[self.id] = self
self.workflow = workflow
self._parent = parent.id if parent is not None else None
self._children = []
self._state = state
self.triggered = False
self.task_spec = task_spec
self.thread_id = self.__class__.thread_id_pool
self.data = {}
self.internal_data = {}
self.last_state_change = time.time()
if parent is not None:
self.parent._child_added_notify(self)
@property
def state(self):
"""`TaskState`: this task's state
Raises:
`WorkflowException`: If setting the state results in a "backwards" state change.
"""
return self._state
@state.setter
def state(self, value):
if value < self._state:
raise WorkflowException(
'state went from %s to %s!' % (TaskState.get_name(self._state), TaskState.get_name(value)),
task_spec=self.task_spec
)
self._set_state(value)
@property
def parent(self):
"""`Task`: This task's parent task"""
return self.workflow.tasks.get(self._parent)
@parent.setter
def parent(self, task):
self._parent = task.id if task is not None else None
@property
def children(self):
"""list(`Task`): This task's child tasks"""
return [self.workflow.tasks.get(child) for child in self._children]
@children.setter
def children(self, tasks):
self._children = [child.id for child in tasks]
@property
def depth(self):
"""The task's depth"""
depth = 0
task = self.parent
while task is not None:
depth += 1
task = task.parent
return depth
def has_state(self, state):
"""Check the state of this task.
Args:
state (`TaskState`): the state to check
Returns:
bool: `True` is the task has the state or mask
"""
return (self._state & state) != 0
def set_data(self, **kwargs):
"""Defines the given attribute/value pairs in this task's data."""
self.data.update(kwargs)
def get_data(self, name, default=None):
"""Returns the value of the data field with the given name, or the given
default value if the data field does not exist.
Args:
name (str): the dictionary key to return
default (obj): a default value to return if the key does not exist
Returns:
the value of the key, or the default
"""
return self.data.get(name, default)
def reset_branch(self, data):
"""Removes all descendendants of this task and set this task to be runnable.
Args:
data (dict): set the task data to these values (if None, inherit from parent task)
Returns:
list(`Task`): tasks removed from the tree
"""
logger.info(f'Branch reset', extra=self.collect_log_extras())
self.internal_data = {}
self.data = deepcopy(self.parent.data) if data is None else data
descendants = [t for t in self]
self._drop_children(force=True)
self._set_state(TaskState.FUTURE)
self.task_spec._predict(self, mask=TaskState.PREDICTED_MASK|TaskState.FUTURE)
self.task_spec._update(self)
return descendants[1:] if len(descendants) > 1 else []
def is_descendant_of(self, task):
"""Checks whether a task is an ancestor of this task.
Args:
task (`Task`): the potential ancestor
Returns:
bool: whether the task is an ancestor of this task
"""
if self.parent is None:
return False
if self.parent == task:
return True
return self.parent.is_descendant_of(task)
def find_ancestor(self, spec_name):
"""Search for an ancestor that has a task with a spec of the specified name.
Args:
spec_name (str): the name of the spec associated with the task
Returns:
`Task`: the first result (or None, if no matching task was found)
"""
if self.parent is None:
return None
if self.parent.task_spec.name == spec_name:
return self.parent
return self.parent.find_ancestor(spec_name)
def _add_child(self, task_spec, state=TaskState.MAYBE):
"""Adds a new child and assigns the given TaskSpec to it.
Args:
task_spec (`TaskSpec`): the spec associated with the child task
state (`TaskState`): the state to assign
Returns:
`Task`: the new child
Raises:
`WorkflowException`: if an invalid task task addition occurs
"""
if self.has_state(TaskState.PREDICTED_MASK) and state & TaskState.PREDICTED_MASK == 0:
raise WorkflowException('Attempt to add non-predicted child to predicted task', task_spec=self.task_spec)
task = Task(self.workflow, task_spec, self, state=state)
task.thread_id = self.thread_id
if state == TaskState.READY:
task._ready()
return task
def _sync_children(self, task_specs, state=TaskState.MAYBE):
"""Syncs the task's children with the given list of task specs.
- Add one child for each given `TaskSpec`, unless that child already exists.
- Remove all children for which there is no spec in the given list, unless it is a "triggered" task.
Notes:
It is an error if the task has a non-predicted child that is not given in the TaskSpecs.
Args:
task_spec (list(`TaskSpec`)): the list of task specs that may become children
state (`TaskState`): the state to assign
"""
if task_specs is None:
raise ValueError('"task_specs" argument is None')
new_children = task_specs[:]
# Create a list of all children that are no longer needed.
unneeded_children = []
for child in self.children:
if child.triggered:
# Triggered tasks are never removed.
pass
elif child.task_spec in new_children:
# If the task already exists, remove it from to-be-added and update its state
new_children.remove(child.task_spec)
if child.has_state(TaskState.NOT_FINISHED_MASK):
child._set_state(state)
else:
if child.has_state(TaskState.DEFINITE_MASK):
# Definite tasks must not be removed, so they HAVE to be in the given task spec list.
raise WorkflowException(f'removal of non-predicted child {child}', task_spec=self.task_spec)
unneeded_children.append(child)
# Update children accordingly
for child in unneeded_children:
self.workflow._remove_task(child.id)
for task_spec in new_children:
self._add_child(task_spec, state)
def _child_added_notify(self, child):
"""Called by another task to let us know that a child was added."""
self._children.append(child.id)
def _drop_children(self, force=False):
"""Remove this task's children from the tree"""
drop = []
for child in self.children:
if force or child.has_state(TaskState.NOT_FINISHED_MASK):
drop.append(child)
else:
child._drop_children()
for task in drop:
self.workflow._remove_task(task.id)
def _set_state(self, value):
"""Force set the state on a task"""
if value != self.state:
elapsed = time.time() - self.last_state_change
self.last_state_change = time.time()
self._state = value
logger.info(
f'State changed to {TaskState.get_name(value)}',
extra=self.collect_log_extras({'elapsed': elapsed})
)
else:
logger.debug(f'State set to {TaskState.get_name(value)}', extra=self.collect_log_extras())
def _assign_new_thread_id(self, recursive=True):
"""Assigns a new thread id to the task."""
self.__class__.thread_id_pool += 1
self.thread_id = self.__class__.thread_id_pool
if not recursive:
return self.thread_id
for child in self:
child.thread_id = self.thread_id
return self.thread_id
def _inherit_data(self):
"""Copies the data from the parent."""
self.set_data(**deepcopy(self.parent.data))
def _set_internal_data(self, **kwargs):
"""Defines the given attribute/value pairs in this task's internal data."""
self.internal_data.update(kwargs)
def _get_internal_data(self, name, default=None):
"""Retrieves an internal data field"""
return self.internal_data.get(name, default)
def _ready(self):
"""Marks the task as ready for execution."""
if self.has_state(TaskState.COMPLETED) or self.has_state(TaskState.CANCELLED):
return
self._set_state(TaskState.READY)
self.task_spec._on_ready(self)
def run(self):
"""Execute the task.
Call's the task spec's `TaskSpec.run` method and checks the return value.
If the return value is
- `True`: mark the task COMPLETE
- `False`: mark the task in ERROR
- `None`: mark the task STARTED
Returns:
bool: the value returned by the `TaskSpec`'s run method
See `TaskState` for more information about states.
"""
start = time.time()
retval = self.task_spec._run(self)
if retval is None:
self._set_state(TaskState.STARTED)
elif retval is False:
self.error()
else:
self.complete()
return retval
def cancel(self):
"""Cancels the item if it was not yet completed; recursively cancels its children."""
if self.has_state(TaskState.FINISHED_MASK):
for child in self.children:
child.cancel()
else:
self._set_state(TaskState.CANCELLED)
self._drop_children()
self.task_spec._on_cancel(self)
def complete(self):
"""Marks this task complete."""
self._set_state(TaskState.COMPLETED)
self.task_spec._on_complete(self)
def error(self):
"""Marks this task as error."""
self._set_state(TaskState.ERROR)
self.task_spec._on_error(self)
def trigger(self, *args):
"""Call the `TaskSpec`'s trigger method.
Args are passed directly to the task spec.
"""
self.task_spec._on_trigger(self, *args)
def collect_log_extras(self, dct=None):
"""Return logging details for this task"""
extra = {
'workflow_spec': self.workflow.spec.name,
'task_spec': self.task_spec.name,
'task_id': self.id,
'task_type': self.task_spec.__class__.__name__,
'state': TaskState.get_name(self._state),
'last_state_change': self.last_state_change,
'elapsed': 0,
'parent': None if self.parent is None else self.parent.id,
}
if dct is not None:
extra.update(dct)
if logger.level < 20:
extra.update({
'data': self.data if logger.level < 20 else None,
'internal_data': self.internal_data if logger.level < 20 else None,
})
return extra
def __iter__(self):
return TaskIterator(self)
def __repr__(self):
return f'<Task object ({self.task_spec.name}) in state {TaskState.get_name(self.state)} with id {self.id}>'
# I will probably remove these methods at some point because I hate them
def get_dump(self, indent=0, recursive=True):
"""Returns the subtree as a string for debugging.
Returns:
str: a tree view of the task (and optionally its children)
"""
dbg = (' ' * indent * 2)
dbg += '%s/' % self.id
dbg += '%s:' % self.thread_id
dbg += ' Task of %s' % self.task_spec.name
if self.task_spec.description:
dbg += ' (%s)' % self.task_spec.description
dbg += ' State: %s' % TaskState.get_name(self._state)
dbg += ' Children: %s' % len(self.children)
if recursive:
for child in self.children:
dbg += '\n' + child.get_dump(indent + 1)
return dbg
def dump(self, indent=0):
"""Prints the subtree as a string for debugging."""
print(self.get_dump())
| 15,896 | Python | .py | 359 | 34.835655 | 117 | 0.619643 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
982 | exceptions.py | sartography_SpiffWorkflow/SpiffWorkflow/exceptions.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
class SpiffWorkflowException(Exception):
"""
Base class for all SpiffWorkflow-generated exceptions.
"""
def __init__(self, msg):
super().__init__(msg)
self.notes = []
def add_note(self, note):
"""add_note is a python 3.11 feature, this can be removed when we
stop supporting versions prior to 3.11"""
self.notes.append(note)
def __str__(self):
return super().__str__() + ". " + ". ".join(self.notes)
class WorkflowException(SpiffWorkflowException):
"""
Base class for all SpiffWorkflow-generated exceptions.
"""
def __init__(self, message, task_spec=None):
"""
Standard exception class.
:param task_spec: the task spec that threw the exception
:type task_spec: TaskSpec
:param error: a human-readable error message
:type error: string
"""
super().__init__(str(message))
# Points to the TaskSpec that generated the exception.
self.task_spec = task_spec
class TaskNotFoundException(WorkflowException):
pass
| 1,911 | Python | .py | 48 | 35.041667 | 73 | 0.699029 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
983 | Gate.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Gate.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from SpiffWorkflow.util.task import TaskState, TaskIterator
from .base import TaskSpec
class Gate(TaskSpec):
"""
This class implements a task that can only execute when another
specified task is completed.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def __init__(self, wf_spec, name, context, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type context: str
:param context: The name of the task that needs to complete before
this task can execute.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.context = context
def _update_hook(self, my_task):
super()._update_hook(my_task)
for task in TaskIterator(my_task.workflow.task_tree, spec_name=self.context):
if task.thread_id != my_task.thread_id:
continue
if not task.has_state(TaskState.COMPLETED):
my_task._set_state(TaskState.WAITING)
return
return True
def serialize(self, serializer):
return serializer.serialize_gate(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_gate(wf_spec, s_state)
| 2,444 | Python | .py | 58 | 35.948276 | 85 | 0.691628 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
984 | Transform.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Transform.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import logging
from .base import TaskSpec
logger = logging.getLogger('spiff.task')
class Transform(TaskSpec):
"""
This class implements a task that transforms input/output data.
"""
def __init__(self, wf_spec, name, transforms=None, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type transforms: list
:param transforms: The commands that this task will execute to
transform data. The commands will be executed using the
python 'exec' function. Accessing inputs and outputs is
achieved by referencing the my_task.* and self.*
variables'
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.transforms = transforms
def _update_hook(self, my_task):
super()._update_hook(my_task)
if self.transforms:
for transform in self.transforms:
logger.debug('Execute transform', extra=my_task.collect_log_extras({'transform': transform}))
exec(transform)
return True
def serialize(self, serializer):
s_state = serializer.serialize_simple(self)
s_state['transforms'] = self.transforms
return s_state
@classmethod
def deserialize(cls, serializer, wf_spec, s_state):
spec = Transform(wf_spec, s_state['name'])
serializer.deserialize_task_spec(wf_spec, s_state, spec=spec)
spec.transforms = s_state['transforms']
return spec
| 2,594 | Python | .py | 60 | 36.083333 | 109 | 0.673146 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
985 | ReleaseMutex.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/ReleaseMutex.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .base import TaskSpec
class ReleaseMutex(TaskSpec):
"""
This class implements a task that releases a mutex (lock), protecting
a section of the workflow from being accessed by other sections.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def __init__(self, wf_spec, name, mutex, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type mutex: str
:param mutex: The name of the mutex that should be released.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.mutex = mutex
def _run_hook(self, my_task):
mutex = my_task.workflow._get_mutex(self.mutex)
mutex.unlock()
return True
def serialize(self, serializer):
return serializer.serialize_release_mutex(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_release_mutex(wf_spec, s_state)
| 2,133 | Python | .py | 51 | 36.803922 | 73 | 0.709117 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
986 | ThreadSplit.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/ThreadSplit.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
from .ThreadStart import ThreadStart
from ..operators import valueof
class ThreadSplit(TaskSpec):
"""
When executed, this task performs a split on the current my_task.
The number of outgoing my_tasks depends on the runtime value of a
specified data field.
If more than one input is connected, the task performs an implicit
multi merge.
This task has one or more inputs and may have any number of outputs.
"""
def __init__(self,
wf_spec,
name,
times=1,
suppress_threadstart_creation=False,
**kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec`
:param wf_spec: A reference to the workflow specification.
:type name: string
:param name: A name for the task.
:type times: int or :class:`SpiffWorkflow.operators.Term`
:param times: The number of tasks to create.
:type suppress_threadstart_creation: bool
:param suppress_threadstart_creation: Don't create a ThreadStart,
because the deserializer is about to.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
if times is None:
raise ValueError('times argument is required')
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.times = times
if not suppress_threadstart_creation:
self.thread_starter = ThreadStart(wf_spec, **kwargs)
self._outputs.append(self.thread_starter.name)
self.thread_starter._connect_notify(self)
else:
self.thread_starter = None
def connect(self, taskspec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
"""
self.thread_starter._outputs.append(taskspec.name)
taskspec._connect_notify(self.thread_starter)
def _get_activated_tasks(self, my_task, destination):
"""
Returns the list of tasks that were activated in the previous
call of execute(). Only returns tasks that point towards the
destination task, i.e. those which have destination as a
descendant.
my_task -- the task of this TaskSpec
destination -- the child task
"""
task = destination.find_ancestor(self.thread_starter.name)
return self.thread_starter._get_activated_tasks(task, destination)
def _get_activated_threads(self, my_task):
"""
Returns the list of threads that were activated in the previous
call of execute().
my_task -- the task of this TaskSpec
"""
return my_task.children
def _on_trigger(self, my_task):
"""
May be called after execute() was already completed to create an
additional outbound task.
"""
for output in self.outputs:
new_task = my_task.add_child(output, TaskState.READY)
new_task.triggered = True
def _get_predicted_outputs(self, my_task):
split_n = int(valueof(my_task, self.times))
return [self.thread_starter] * split_n
def _predict_hook(self, my_task):
# if we were created with thread_starter suppressed, connect it now.
if self.thread_starter is None:
self.thread_starter = self.outputs[0]
outputs = self._get_predicted_outputs(my_task)
if my_task.has_state(TaskState.DEFINITE_MASK):
my_task._sync_children(outputs, TaskState.FUTURE)
else:
my_task._sync_children(outputs, TaskState.LIKELY)
def _run_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
my_task._sync_children(outputs, TaskState.FUTURE)
return True
def serialize(self, serializer):
return serializer.serialize_thread_split(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_thread_split(wf_spec, s_state)
| 4,970 | Python | .py | 116 | 35.163793 | 76 | 0.666529 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
987 | ExclusiveChoice.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/ExclusiveChoice.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from ..exceptions import WorkflowException
from .MultiChoice import MultiChoice
class ExclusiveChoice(MultiChoice):
"""
This class represents an exclusive choice (an if condition) task
where precisely one outgoing task is selected. If none of the
given conditions matches, a default task is selected.
It has one or more inputs and two or more outputs.
"""
def __init__(self, wf_spec, name, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
super(ExclusiveChoice, self).__init__(wf_spec, name, **kwargs)
self.default_task_spec = None
def connect(self, taskspec):
"""
Connects the task spec that is executed if no other condition
matches.
:type task_spec: TaskSpec
:param task_spec: The following task spec.
"""
self.default_task_spec = taskspec.name
super().connect(taskspec)
def test(self):
super().test()
if self.default_task_spec is None:
raise WorkflowException('A default output is required.', task_spec=self)
def _run_hook(self, my_task):
output = my_task.workflow.spec.get_task_spec_from_name(self.default_task_spec)
for condition, spec_name in self.cond_task_specs:
if condition is not None and condition._matches(my_task):
output = my_task.workflow.spec.get_task_spec_from_name(spec_name)
break
if output is None:
raise WorkflowException(f'No conditions satisfied for {my_task.task_spec.name}', task_spec=self)
my_task._sync_children([output], TaskState.FUTURE)
for child in my_task.children:
child.task_spec._predict(child, mask=TaskState.FUTURE|TaskState.PREDICTED_MASK)
return True
def serialize(self, serializer):
return serializer.serialize_exclusive_choice(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_exclusive_choice(wf_spec, s_state)
| 3,156 | Python | .py | 70 | 38.542857 | 108 | 0.696604 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
988 | Join.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Join.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from SpiffWorkflow.task import Task
from SpiffWorkflow.util.task import TaskState, TaskIterator
from ..exceptions import WorkflowException
from .base import TaskSpec
from ..operators import valueof
class Join(TaskSpec):
"""
A task for synchronizing branches that were previously split using a
conditional task, such as MultiChoice. It has two or more incoming
branches and one or more outputs.
Keep in mind that each Join spec may have multiple corresponding
Task objects::
- When using the MultiInstance task
- When using the ThreadSplit task
When using the MultiInstance pattern, Join works across all
the resulting task instances. When using the ThreadSplit
pattern, Join ignores instances from another thread.
A Join task may enter the following states::
- FUTURE, LIKELY, or MAYBE: These are the initial predicted states.
- WAITING: This state is reached as soon as at least one of the
predecessors has completed.
- READY: All predecessors have completed. If multiple tasks within
the thread reference the same Join (e.g. if MultiInstance is used),
this state is only reached on one of the tasks; all other tasks go
directly from WAITING to completed.
- COMPLETED: All predecessors have completed, and
:class:`Task.complete()` was called.
The state may also change directly from WAITING to COMPLETED if the
Trigger pattern is used.
"""
def __init__(self,
wf_spec,
name,
split_task=None,
threshold=None,
cancel=False,
**kwargs):
"""
Constructor.
:type wf_spec: :class:`SpiffWorkflow.specs.WorkflowSpec`
:param wf_spec: A reference to the parent (usually a workflow).
:type name: string
:param name: A name for the task.
:type split_task: str or None
:param split_task: The name of the task spec that was previously
used to split the branch. If this is None,
the most recent branch split is merged.
:type threshold: int, :class:`SpiffWorkflow.operators.Attrib`, or None
:param threshold: Specifies how many incoming branches need to
complete before the task triggers. When the limit
is reached, the task fires but still expects all
other branches to complete.
You may also pass an attribute, in which case
the value is resolved at runtime.
Passing None means all incoming branches.
:type cancel: bool
:param cancel: When True, any remaining incoming branches are
cancelled as soon as the discriminator is activated.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
super(Join, self).__init__(wf_spec, name, **kwargs)
self.split_task = split_task
self.threshold = threshold
self.cancel_remaining = cancel
def _check_threshold_unstructured(self, my_task):
# This method is extremely poorly named. It is called where there is no split task, but whether or not
# there is a known split is actually irrelevant. The distinction that actually needs to be made is
# "Do we have to look at unfinshed tasks to find out if any of the might pass through this task?" vs
# "Can we make a distinction solely by looking at our own completed inputs?"
# The default threshold is the number of inputs.
threshold = valueof(my_task, self.threshold)
if threshold is None:
threshold = len(self.inputs)
# Find all places where this task spec is used and check whether enough inputs have completed to meet the threshold
# Omit building the list of waiting tasks unless they need to be cancelled if the threshold is met
waiting_tasks = []
completed = 0
spec_names = [ts.name for ts in self.inputs]
for task in TaskIterator(my_task.workflow.task_tree, end_at_spec=self.name):
if not task.task_spec.name in spec_names:
continue
if task.parent is None or task.has_state(TaskState.COMPLETED):
completed += 1
elif not task.has_state(TaskState.FINISHED_MASK) and self.cancel_remaining:
waiting_tasks.append(task)
if completed >= threshold:
may_fire = True
if not self.cancel_remaining:
break
else:
may_fire = False
# If the threshold was reached, get ready to fire.
return may_fire, waiting_tasks
def _branch_may_merge(self, task):
for child in TaskIterator(task, end_at_spec=self.name):
# Ignore tasks that were created by a trigger.
if child.triggered:
continue
# Merge found.
if child.task_spec == self:
return True
# If the task is predicted with less outputs than he has
# children, that means the prediction may be incomplete (for
# example, because a prediction is not yet possible at this time).
if child.has_state(TaskState.PREDICTED_MASK) and len(child.task_spec.outputs) > len(child.children):
return True
return False
def _branch_is_complete(self, task):
# Determine whether that branch is now completed by checking whether
# it has any waiting items other than myself in it.
for child in TaskIterator(task, state=TaskState.NOT_FINISHED_MASK, end_at_spec=self.name):
if child.task_spec != self:
return False
return True
def _check_threshold_structured(self, my_task):
# Retrieve a list of all activated tasks from the associated task that did the conditional parallel split.
split_task = my_task.find_ancestor(self.split_task)
if split_task is None:
raise WorkflowException(f'Split task {self.split_task} which was not reached', task_spec=self)
tasks = split_task.task_spec._get_activated_tasks(split_task, my_task)
# The default threshold is the number of branches that were started.
threshold = valueof(my_task, self.threshold)
if threshold is None:
threshold = len(tasks)
# Look up which tasks have already completed.
waiting_tasks = []
completed = 0
for task in tasks:
if self._branch_is_complete(task):
completed += 1
elif not self._branch_may_merge(task):
completed += 1
else:
waiting_tasks.append(task)
# If the threshold was reached, get ready to fire.
return completed >= threshold, waiting_tasks
def _update_hook(self, my_task):
# Check whether enough incoming branches have completed.
my_task._inherit_data()
if self.split_task is None:
may_fire, waiting_tasks = self._check_threshold_unstructured(my_task)
else:
may_fire, waiting_tasks = self._check_threshold_structured(my_task)
if may_fire:
# If this is a cancelling join, cancel all incoming branches except for the one that just completed.
if self.cancel_remaining:
for task in waiting_tasks:
task.cancel()
# Update the state of our child objects.
self._do_join(my_task)
elif not my_task.has_state(TaskState.FINISHED_MASK):
my_task._set_state(TaskState.WAITING)
return may_fire
def _find_tasks(self, my_task):
split_task = my_task.find_ancestor(self.split_task) or my_task.workflow.task_tree
# Identify all corresponding task instances within the thread.
thread_tasks = []
for task in TaskIterator(split_task, spec_name=self.name, end_at_spec=self.name):
# Ignore tasks from other threads.
if task.thread_id != my_task.thread_id:
continue
# Ignore my outgoing branches.
if self.split_task and task.is_descendant_of(my_task):
continue
# We have found a matching instance.
thread_tasks.append(task)
return thread_tasks
def _do_join(self, my_task):
# Execution will continue from this task; mark others as cancelled
for task in self._find_tasks(my_task):
if task != my_task:
task._set_state(TaskState.CANCELLED)
task._drop_children()
def _on_trigger(self, my_task):
"""
May be called to fire the Join before the incoming branches are
completed.
"""
tasks = sorted(self._find_tasks(my_task), key=lambda t: t.last_state_change)
for task in tasks[:-1]:
task._set_state(TaskState.CANCELLED)
task._drop_children()
tasks[-1]._ready()
def serialize(self, serializer):
return serializer.serialize_join(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_join(wf_spec, s_state)
| 10,251 | Python | .py | 207 | 39.241546 | 123 | 0.643749 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
989 | WorkflowSpec.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/WorkflowSpec.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..specs.StartTask import StartTask
class WorkflowSpec(object):
"""
This class represents the specification of a workflow.
"""
def __init__(self, name=None, filename=None, addstart=False):
"""
Constructor.
"""
self.name = name or ''
self.description = ''
self.file = filename
self.task_specs = dict()
self.start = None
if addstart:
self.start = StartTask(self)
def _add_notify(self, task_spec):
"""
Called by a task spec when it was added into the workflow.
"""
if task_spec.name in self.task_specs:
raise KeyError('Duplicate task spec name: ' + task_spec.name)
self.task_specs[task_spec.name] = task_spec
def get_task_spec_from_name(self, name):
"""
Returns the task with the given name.
:type name: str
:param name: The name of the task spec.
:rtype: TaskSpec
:returns: The task spec with the given name.
"""
return self.task_specs.get(name)
def validate(self):
"""Checks integrity of workflow and reports any problems with it.
Detects:
- loops (tasks that wait on each other in a loop)
:returns: empty list if valid, a list of errors if not
"""
results = []
from ..specs.Join import Join
def recursive_find_loop(task, history):
current = history[:]
current.append(task)
if isinstance(task, Join):
if task in history:
msg = "Found loop with '%s': %s then '%s' again" % (
task.name, '->'.join([p.name for p in history]),
task.name)
raise Exception(msg)
for predecessor in task.inputs:
recursive_find_loop(predecessor, current)
for parent in task.inputs:
recursive_find_loop(parent, current)
for task_id, task in list(self.task_specs.items()):
# Check for cyclic waits
try:
recursive_find_loop(task, [])
except Exception as exc:
results.append(exc.__str__())
# Check for disconnected tasks
if not task.inputs and task.name not in ['Start', 'Root']:
if task.outputs:
results.append(f"Task '{task.name}' is disconnected (no inputs)")
else:
results.append(f"Task '{task.name}' is not being used")
return results
def serialize(self, serializer, **kwargs):
"""
Serializes the instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: object
:returns: The serialized object.
"""
return serializer.serialize_workflow_spec(self, **kwargs)
@classmethod
def deserialize(cls, serializer, s_state, **kwargs):
"""
Deserializes a WorkflowSpec instance using the provided serializer.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type s_state: object
:param s_state: The serialized workflow specification object.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: WorkflowSpec
:returns: The resulting instance.
"""
return serializer.deserialize_workflow_spec(s_state, **kwargs)
def get_dump(self, verbose=False):
done = set()
def recursive_dump(task_spec, indent):
if task_spec in done:
return '[shown earlier] %s (%s:%s)' % (
task_spec.name,
task_spec.__class__.__name__,
hex(id(task_spec))
) + '\n'
done.add(task_spec)
dump = '%s (%s:%s)' % (
task_spec.name,
task_spec.__class__.__name__,
hex(id(task_spec))
) + '\n'
if verbose:
if task_spec.inputs:
dump += indent + \
'- IN: ' + \
','.join(['%s (%s)' % (t.name, hex(id(t))) for t in task_spec.inputs]) + \
'\n'
if task_spec.outputs:
dump += indent + \
'- OUT: ' + \
','.join(['%s (%s)' % (t.name, hex(id(t))) for t in task_spec.outputs]) + \
'\n'
# sub_specs = ([task_spec.spec.start] if hasattr(task_spec, 'spec') else []) + task_spec.outputs
for i, t in enumerate(task_spec.outputs):
dump += indent + \
' --> ' + \
recursive_dump(t, indent + (' | ' if i + 1 < len(task_spec.outputs) else ' '))
return dump
dump = recursive_dump(self.start, '')
return dump
def dump(self):
print(self.get_dump())
| 6,072 | Python | .py | 145 | 30.910345 | 108 | 0.560149 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
990 | Merge.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Merge.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .Join import Join
from ..util.deep_merge import DeepMerge
class Merge(Join):
"""Same as Join, but merges all input data instead of just parents'
Note: data fields that have conflicting names will be overwritten"""
def _do_join(self, my_task):
# Merge all inputs (in order)
for input_spec in self.inputs:
tasks = [task for task in my_task.workflow.task_tree
if task.task_spec is input_spec]
for task in tasks:
DeepMerge.merge(my_task.data, task.data)
return super(Merge, self)._do_join(my_task)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_merge(wf_spec, s_state)
| 1,564 | Python | .py | 34 | 41.470588 | 72 | 0.726855 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
991 | ThreadMerge.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/ThreadMerge.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from SpiffWorkflow.util.task import TaskState, TaskIterator
from ..exceptions import WorkflowException
from ..operators import valueof
from ..specs.Join import Join
class ThreadMerge(Join):
"""
This class represents a task for synchronizing branches that were
previously split using a a ThreadSplit.
It has two or more incoming branches and one or more outputs.
"""
def __init__(self,
wf_spec,
name,
split_task,
**kwargs):
"""
Constructor.
:type wf_spec: :class:`SpiffWorkflow.specs.WorkflowSpec`
:param wf_spec: A reference to the parent (usually a workflow).
:type name: string
:param name: A name for the task.
:type split_task: str
:param split_task: The name of the task spec that was previously
used to split the branch.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.Join`.
"""
Join.__init__(self, wf_spec, name, split_task, **kwargs)
def _start(self, my_task):
# If the threshold was already reached, there is nothing else to do.
if my_task.has_state(TaskState.COMPLETED):
return False
if my_task.has_state(TaskState.READY):
return True
# Retrieve a list of all activated tasks from the associated task that did the conditional parallel split.
split_task = my_task.find_ancestor(self.split_task)
if split_task is None:
raise WorkflowException(f'Join with %s, which was not reached {self.split_task}', task_spec=self)
tasks = split_task.task_spec._get_activated_threads(split_task)
# The default threshold is the number of threads that were started.
threshold = valueof(my_task, self.threshold)
if threshold is None:
threshold = len(tasks)
# Look up which tasks have already completed.
waiting_tasks = []
completed = 0
for task in tasks:
# Refresh path prediction.
task.task_spec._predict(task)
if self._branch_is_complete(task):
completed += 1
else:
waiting_tasks.append(task)
# If the threshold was reached, get ready to fire.
if completed >= threshold:
# If this is a cancelling join, cancel all incoming branches,
# except for the one that just completed.
if self.cancel_remaining:
for task in waiting_tasks:
task.cancel()
return True
# We do NOT set the task state to COMPLETED, because in
# case all other incoming tasks get cancelled (or never reach
# the ThreadMerge for other reasons, such as reaching a stub branch),
# we need to revisit it.
return False
def _update_hook(self, my_task):
my_task._inherit_data()
if not self._start(my_task):
my_task._set_state(TaskState.WAITING)
return
split_task = my_task.find_ancestor(self.split_task)
# Find the inbound task that was completed last.
last_changed = None
tasks = []
for task in TaskIterator(split_task, spec_name=self.name):
if self.split_task and task.is_descendant_of(my_task):
continue
changed = task.parent.last_state_change
if last_changed is None or changed > last_changed.parent.last_state_change:
last_changed = task
tasks.append(task)
# Mark all tasks in this thread that reference this task as
# completed, except for the first one, which should be READY.
for task in tasks:
if task == last_changed:
self.update_event.emit(my_task.workflow, my_task)
task._ready()
else:
task._set_state(TaskState.COMPLETED)
task._drop_children()
def serialize(self, serializer):
return serializer.serialize_thread_merge(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_thread_merge(wf_spec, s_state)
| 5,086 | Python | .py | 114 | 35.517544 | 114 | 0.643982 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
992 | Trigger.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Trigger.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
from ..operators import valueof
class Trigger(TaskSpec):
"""
This class implements a task that triggers an event on another
task.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def __init__(self, wf_spec, name, context, times=1, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type context: list(str)
:param context: A list of the names of tasks that are to be triggered.
:type times: int or :class:`SpiffWorkflow.operators.Term`
:param times: The number of signals before the trigger fires.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.context = context
self.times = times
self.queued = 0
def _on_trigger(self, my_task):
"""
Enqueue a trigger, such that this tasks triggers multiple times later
when _on_complete() is called.
"""
self.queued += 1
# All tasks that have already completed need to be put back to
# READY.
for task in my_task.workflow.task_tree:
if task.thread_id != my_task.thread_id:
continue
if task.task_spec == self and task.has_state(TaskState.COMPLETED):
task._set_state(TaskState.FUTURE)
task._ready()
def _run_hook(self, my_task):
"""
A hook into _on_complete() that does the task specific work.
:type my_task: Task
:param my_task: A task in which this method is executed.
:rtype: bool
:returns: True on success, False otherwise.
"""
times = int(valueof(my_task, self.times, 1)) + self.queued
for i in range(times):
for task_name in self.context:
task_spec = my_task.workflow.spec.get_task_spec_from_name(task_name)
task_spec._on_trigger(my_task)
self.queued = 0
return True
def serialize(self, serializer):
return serializer.serialize_trigger(self)
@classmethod
def deserialize(cls, serializer, wf_spec, s_state, **kwargs):
"""
Deserializes the trigger using the provided serializer.
"""
return serializer.deserialize_trigger(wf_spec,
s_state,
**kwargs)
| 3,586 | Python | .py | 87 | 33.287356 | 84 | 0.642775 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
993 | MultiChoice.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/MultiChoice.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from ..exceptions import WorkflowException
from .base import TaskSpec
class MultiChoice(TaskSpec):
"""
This class represents an if condition where multiple conditions may match
at the same time, creating multiple outgoing branches.
This task has one or more inputs, and one or more incoming branches.
This task has one or more outputs.
"""
def __init__(self, wf_spec, name, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
super(MultiChoice, self).__init__(wf_spec, name, **kwargs)
self.cond_task_specs = []
self.choice = None
def connect(self, taskspec):
"""
Convenience wrapper around connect_if() where condition is set to None.
"""
return self.connect_if(None, taskspec)
def connect_if(self, condition, task_spec):
"""
Connects a taskspec that is executed if the condition DOES match.
condition -- a condition (Condition)
taskspec -- the conditional task spec
"""
self._outputs.append(task_spec.name)
self.cond_task_specs.append((condition, task_spec.name))
task_spec._connect_notify(self)
def test(self):
"""
Checks whether all required attributes are set. Throws an exception
if an error was detected.
"""
TaskSpec.test(self)
if len(self.cond_task_specs) < 1:
raise WorkflowException('At least one output required.', task_spec=self)
for condition, name in self.cond_task_specs:
if name is None:
raise WorkflowException('Condition with no task spec.', task_spec=self)
task_spec = self._wf_spec.get_task_spec_from_name(name)
if task_spec is None:
msg = 'Condition leads to non-existent task ' + repr(name)
raise WorkflowException(msg, task_spec=self)
if condition is None:
continue
def _on_trigger(self, my_task, choice):
"""
Lets a caller narrow down the choice by using a Choose trigger.
"""
self.choice = choice
# The caller needs to make sure that predict() is called.
def _predict_hook(self, my_task):
conditional, unconditional = [], []
for condition, output in self.cond_task_specs:
if self.choice is not None and output not in self.choice:
continue
if condition is None:
unconditional.append(my_task.workflow.spec.get_task_spec_from_name(output))
else:
conditional.append(my_task.workflow.spec.get_task_spec_from_name(output))
state = TaskState.MAYBE if my_task.state == TaskState.MAYBE else TaskState.LIKELY
my_task._sync_children(unconditional, state)
for spec in conditional:
my_task._add_child(spec, TaskState.MAYBE)
def _get_matching_outputs(self, my_task):
matches, defaults = [], []
for condition, output in self.cond_task_specs:
if self.choice is not None and output not in self.choice:
continue
if condition is None:
defaults.append(my_task.workflow.spec.get_task_spec_from_name(output))
elif condition._matches(my_task):
matches.append(my_task.workflow.spec.get_task_spec_from_name(output))
return matches, defaults
def _run_hook(self, my_task):
"""Runs the task. Should not be called directly."""
matches, defaults = self._get_matching_outputs(my_task)
my_task._sync_children(matches + defaults, TaskState.FUTURE)
for child in my_task.children:
child.task_spec._predict(child, mask=TaskState.FUTURE|TaskState.PREDICTED_MASK)
return True
def serialize(self, serializer):
return serializer.serialize_multi_choice(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_multi_choice(wf_spec, s_state)
| 5,135 | Python | .py | 113 | 37.292035 | 91 | 0.661207 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
994 | AcquireMutex.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/AcquireMutex.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
class AcquireMutex(TaskSpec):
"""
This class implements a task that acquires a mutex (lock), protecting
a section of the workflow from being accessed by other sections.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def __init__(self, wf_spec, name, mutex, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type mutex: str
:param mutex: The name of the mutex that should be acquired.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.mutex = mutex
def _update_hook(self, my_task):
super()._update_hook(my_task)
mutex = my_task.workflow._get_mutex(self.mutex)
if mutex.testandset():
self.update_event.emit(my_task.workflow, my_task)
return True
else:
my_task._set_state(TaskState.WAITING)
def serialize(self, serializer):
return serializer.serialize_acquire_mutex(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_acquire_mutex(wf_spec, s_state)
| 2,341 | Python | .py | 56 | 36.357143 | 73 | 0.70123 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
995 | Choose.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Choose.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .base import TaskSpec
from .Trigger import Trigger
class Choose(Trigger):
"""
This class implements a task that causes an associated MultiChoice
task to select the tasks with the specified name.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def __init__(self, wf_spec, name, context, choice=None, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type context: str
:param context: The name of the MultiChoice that is instructed to
select the specified outputs.
:type choice: list(str)
:param choice: The list of task spec names that is selected.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
# HACK: inherit from TaskSpec (not Trigger) on purpose.
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.context = context
self.choice = choice is not None and choice or []
def _run_hook(self, my_task):
context = my_task.workflow.spec.get_task_spec_from_name(self.context)
triggered = []
for task in my_task.workflow.task_tree:
if task.thread_id != my_task.thread_id:
continue
if task.task_spec == context:
task.trigger(self.choice)
triggered.append(task)
for task in triggered:
context._predict(task)
return True
def serialize(self, serializer):
return serializer.serialize_choose(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_choose(wf_spec, s_state)
| 2,763 | Python | .py | 65 | 35.953846 | 77 | 0.68241 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
996 | MultiInstance.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/MultiInstance.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
from ..operators import valueof
class MultiInstance(TaskSpec):
"""
When executed, this task performs a split on the current task.
The number of outgoing tasks depends on the runtime value of a
specified data field.
If more than one input is connected, the task performs an implicit
multi merge.
This task has one or more inputs and may have any number of outputs.
"""
def __init__(self, wf_spec, name, times, **kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification.
:type name: str
:param name: The name of the task spec.
:type times: int or :class:`SpiffWorkflow.operators.Term`
:param times: The number of tasks to create.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
if times is None:
raise ValueError('times argument is required')
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.times = times
def _find_my_task(self, task):
for thetask in task.workflow.task_tree:
if thetask.thread_id != task.thread_id:
continue
if thetask.task_spec == self:
return thetask
return None
def _on_trigger(self, task_spec):
"""
May be called after execute() was already completed to create an
additional outbound task.
"""
# Find a Task for this TaskSpec.
my_task = self._find_my_task(task_spec)
if my_task.has_state(TaskState.COMPLETED):
state = TaskState.READY
else:
state = TaskState.FUTURE
for output in self.outputs:
new_task = my_task._add_child(output, state)
new_task.triggered = True
output._predict(new_task, mask=TaskState.FUTURE|TaskState.READY|TaskState.PREDICTED_MASK)
def _get_predicted_outputs(self, my_task):
split_n = int(valueof(my_task, self.times, 1))
return self.outputs * split_n
def _predict_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
if my_task.has_state(TaskState.DEFINITE_MASK):
my_task._sync_children(outputs, TaskState.FUTURE)
else:
my_task._sync_children(outputs, TaskState.LIKELY)
def _run_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
my_task._sync_children(outputs, TaskState.FUTURE)
self._predict(my_task, mask=TaskState.FUTURE|TaskState.PREDICTED_MASK)
return True
def serialize(self, serializer):
return serializer.serialize_multi_instance(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_multi_instance(wf_spec, s_state)
| 3,731 | Python | .py | 87 | 35.885057 | 101 | 0.676033 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
997 | __init__.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/__init__.py | # This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .AcquireMutex import AcquireMutex
from .Cancel import Cancel
from .CancelTask import CancelTask
from .Choose import Choose
from .ExclusiveChoice import ExclusiveChoice
from .Execute import Execute
from .Gate import Gate
from .Join import Join
from .Merge import Merge
from .MultiChoice import MultiChoice
from .MultiInstance import MultiInstance
from .ReleaseMutex import ReleaseMutex
from .Simple import Simple
from .StartTask import StartTask
from .SubWorkflow import SubWorkflow
from .ThreadStart import ThreadStart
from .ThreadMerge import ThreadMerge
from .ThreadSplit import ThreadSplit
from .Transform import Transform
from .Trigger import Trigger
from .WorkflowSpec import WorkflowSpec | 1,470 | Python | .py | 37 | 38.72973 | 69 | 0.829728 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
998 | base.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/base.py | # Copyright (C) 2007 Samuel Abels, 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from abc import abstractmethod
from SpiffWorkflow.util.task import TaskState
from SpiffWorkflow.util.event import Event
from ..exceptions import WorkflowException
class TaskSpec(object):
"""
This class implements an abstract base type for all tasks.
Tasks provide the following signals:
- **entered**: called when the state changes to READY or WAITING, at a
time where spec data is not yet initialized.
- **reached**: called when the state changes to READY or WAITING, at a
time where spec data is already initialized using data_assign
and pre-assign.
- **ready**: called when the state changes to READY, at a time where
spec data is already initialized using data_assign and
pre-assign.
- **completed**: called when the state changes to COMPLETED, at a time
before the post-assign variables are assigned.
- **cancelled**: called when the state changes to CANCELLED, at a time
before the post-assign variables are assigned.
- **finished**: called when the state changes to COMPLETED or CANCELLED,
at the last possible time after the post-assign variables are
assigned and mutexes are released.
Event sequence is: entered -> reached -> ready -> completed -> finished
(cancelled may happen at any time)
The only events where implementing something other than state tracking
may be useful are the following:_
- Reached: You could mess with the pre-assign variables here, for
example. Other then that, there is probably no need in a real
application.
- Ready: This is where a task could implement custom code, for example
for triggering an external system. This is also the only event where a
return value has a meaning (returning non-True will mean that the
post-assign procedure is skipped.)
"""
def __init__(self, wf_spec, name, **kwargs):
"""
Constructor.
The difference between the assignment of a data value using
the data argument versus pre_assign and post_assign is that
changes made using data are task-local, i.e. they are
not visible to other tasks.
Similarly, "defines" are spec data fields that, once defined, can
no longer be modified.
:type wf_spec: WorkflowSpec
:param wf_spec: A reference to the workflow specification that owns it.
:type name: string
:param name: A name for the task.
:type manual: bool
:param manual: Whether this task requires a manual action to complete.
:type data: dict((str, object))
:param data: name/value pairs
:type defines: dict((str, object))
:param defines: name/value pairs
:type pre_assign: list((str, object))
:param pre_assign: a list of name/value pairs
:type post_assign: list((str, object))
:param post_assign: a list of name/value pairs
"""
self._wf_spec = wf_spec
self.name = str(name)
self.description = kwargs.get('description', None)
self._inputs = kwargs.get('inputs', [])
self._outputs = kwargs.get('outputs', [])
self.manual = kwargs.get('manual', False)
self.data = kwargs.get('data', {})
self.defines = kwargs.get('defines', {})
self.pre_assign = kwargs.get('pre_assign',[])
self.post_assign = kwargs.get('post_assign', [])
self.lookahead = 2 # Maximum number of MAYBE predictions.
# Events.
self.update_event = Event()
self.ready_event = Event()
self.completed_event = Event()
self.error_event = Event()
self.cancelled_event = Event()
self.run_event = Event()
self._wf_spec._add_notify(self)
self.data.update(self.defines)
@property
def inputs(self):
return [self._wf_spec.task_specs.get(name) for name in self._inputs]
@inputs.setter
def inputs(self, task_specs):
self._inputs = [spec.name for spec in task_specs]
@property
def outputs(self):
return [self._wf_spec.task_specs.get(name) for name in self._outputs]
@outputs.setter
def outputs(self, task_specs):
self._outputs = [spec.name for spec in task_specs]
def _connect_notify(self, taskspec):
"""
Called by the previous task to let us know that it exists.
:type taskspec: TaskSpec
:param taskspec: The task by which this method is executed.
"""
self._inputs.append(taskspec.name)
def ancestors(self):
"""Returns list of ancestor task specs based on inputs"""
results = []
def recursive_find_ancestors(task, stack):
for input in task.inputs:
if input not in stack:
stack.append(input)
recursive_find_ancestors(input, stack)
recursive_find_ancestors(self, results)
return results
def _get_activated_tasks(self, my_task, destination):
"""
Returns the list of tasks that were activated in the previous
call of execute(). Only returns tasks that point towards the
destination task, i.e. those which have destination as a
descendant.
:type my_task: Task
:param my_task: The associated task in the task tree.
:type destination: Task
:param destination: The destination task.
"""
return my_task.children
def _get_activated_threads(self, my_task):
"""
Returns the list of threads that were activated in the previous
call of execute().
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
return my_task.children
def set_data(self, **kwargs):
"""
Defines the given data field(s) using the given name/value pairs.
"""
for key in kwargs:
if key in self.defines:
msg = "Spec data %s can not be modified" % key
raise WorkflowException(self, msg)
self.data.update(kwargs)
def get_data(self, name, default=None):
"""
Returns the value of the data field with the given name, or the
given default value if the data was not defined.
:type name: string
:param name: The name of the data field.
:type default: string
:param default: Returned if the data field is not defined.
"""
return self.data.get(name, default)
def connect(self, taskspec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task.
"""
self._outputs.append(taskspec.name)
taskspec._connect_notify(self)
def test(self):
"""
Checks whether all required attributes are set. Throws an exception
if an error was detected.
"""
if len(self.inputs) < 1:
raise WorkflowException(self, 'No input task connected.')
def _predict(self, my_task, seen=None, looked_ahead=0, mask=TaskState.PREDICTED_MASK):
"""
Updates the branch such that all possible future routes are added.
Should NOT be overwritten! Instead, overwrite _predict_hook().
:type my_task: Task
:param my_task: The associated task in the task tree.
:type seen: list[taskspec]
:param seen: A list of already visited tasks.
:type looked_ahead: integer
:param looked_ahead: The depth of the predicted path so far.
"""
if seen is None:
seen = []
if my_task.has_state(mask):
self._predict_hook(my_task)
if my_task.has_state(TaskState.PREDICTED_MASK):
seen.append(self)
look_ahead = my_task.has_state(TaskState.DEFINITE_MASK) or looked_ahead + 1 < self.lookahead
for child in my_task.children:
if child.has_state(mask) and child not in seen and look_ahead:
child.task_spec._predict(child, seen[:], looked_ahead + 1, mask)
def _predict_hook(self, my_task):
# If the task's status is definite, we default to FUTURE for all it's outputs.
# Otherwise, copy my own state to the children.
if my_task.has_state(TaskState.DEFINITE_MASK):
best_state = TaskState.FUTURE
else:
best_state = my_task.state
my_task._sync_children(self.outputs, best_state)
def _update(self, my_task):
"""
Called whenever any event happens that may affect the
state of this task in the workflow. For example, if a predecessor
completes it makes sure to call this method so we can react.
"""
if my_task.has_state(TaskState.PREDICTED_MASK):
self._predict(my_task)
if self._update_hook(my_task):
my_task._ready()
def _update_hook(self, my_task):
"""
This method should decide whether the task should run now or need to wait.
Tasks can also optionally choose not to inherit data.
Returning True will cause the task to go into READY.
"""
my_task._inherit_data()
self.update_event.emit(my_task.workflow, my_task)
return True
def _on_ready(self, my_task):
"""
Return True on success, False otherwise.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
self.test()
# Assign variables, if so requested.
for assignment in self.pre_assign:
assignment.assign(my_task, my_task)
# Run task-specific code.
self._on_ready_hook(my_task)
def _on_ready_hook(self, my_task):
"""
A hook into _on_ready() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
self.ready_event.emit(my_task.workflow, my_task)
def _run(self, my_task):
"""
Run the task.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: boolean or None
:returns: the value returned by the task spec's run method.
"""
# I'm not sure I like setting the state here. I'd like to handle it in `task` like
# the other transitions, and allow task specific error handling behavior.
# Having a task return a boolean indicating success (or None if it should just wait
# because the task is running) works well for scripts, but not for other types
# This is the easiest way of dealing with all other errors.
try:
result = self._run_hook(my_task)
# Run user code, if any.
for assignment in self.post_assign:
assignment.assign(my_task, my_task)
return result
except Exception as exc:
my_task.error()
raise exc
def _run_hook(self, my_task):
"""
A hook into _run() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
self.run_event.emit(my_task.workflow, my_task)
return True
def _on_cancel(self, my_task):
"""
May be called by another task to cancel the operation before it was
completed.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
self.cancelled_event.emit(my_task.workflow, my_task)
def _on_trigger(self, my_task):
"""
May be called by another task to trigger a task-specific
event.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: boolean
:returns: True on success, False otherwise.
"""
raise NotImplementedError("Trigger not supported by this task.")
def _on_complete(self, my_task):
"""
Return True on success, False otherwise. Should not be overwritten,
overwrite _on_complete_hook() instead.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: boolean
:returns: True on success, False otherwise.
"""
self._on_complete_hook(my_task)
for child in my_task.children:
if not child.has_state(TaskState.FINISHED_MASK):
child.task_spec._update(child)
my_task.workflow._task_completed_notify(my_task)
def _on_complete_hook(self, my_task):
"""
A hook into _on_complete() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: bool
:returns: True on success, False otherwise.
"""
self.completed_event.emit(my_task.workflow, my_task)
def _on_error(self, my_task):
self._on_error_hook(my_task)
def _on_error_hook(self, my_task):
"""Can be overridden for task specific error handling"""
self.error_event.emit(my_task.workflow, my_task)
@abstractmethod
def serialize(self, serializer, **kwargs):
"""
Serializes the instance using the provided serializer.
.. note::
The events of a TaskSpec are not serialized. If you
use them, make sure to re-connect them once the spec is
deserialized.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: object
:returns: The serialized object.
"""
module = self.__class__.__module__
class_name = module + '.' + self.__class__.__name__
return {
'class': class_name,
'name':self.name,
'description':self.description,
'inputs': self._inputs,
'outputs': self._outputs,
'manual':self.manual,
'data':self.data,
'defines':self.defines,
'pre_assign':self.pre_assign,
'post_assign':self.post_assign,
'lookahead':self.lookahead,
}
@classmethod
def deserialize(cls, serializer, wf_spec, s_state, **kwargs):
"""
Deserializes the instance using the provided serializer.
.. note::
The events of a TaskSpec are not serialized. If you
use them, make sure to re-connect them once the spec is
deserialized.
:type serializer: :class:`SpiffWorkflow.serializer.base.Serializer`
:param serializer: The serializer to use.
:type wf_spec: :class:`SpiffWorkflow.spec.WorkflowSpec`
:param wf_spec: An instance of the WorkflowSpec.
:type s_state: object
:param s_state: The serialized task specification object.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: TaskSpec
:returns: The task specification instance.
"""
out = cls(wf_spec,s_state.get('name'))
out.name = s_state.get('name')
out.description = s_state.get('description')
out._inputs = s_state.get('inputs')
out._outputs = s_state.get('outputs')
out.manual = s_state.get('manual')
out.data = s_state.get('data')
out.defines = s_state.get('defines')
out.pre_assign = s_state.get('pre_assign')
out.post_assign = s_state.get('post_assign')
out.lookahead = s_state.get('lookahead')
return out
| 16,717 | Python | .py | 385 | 34.420779 | 100 | 0.627461 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
999 | Simple.py | sartography_SpiffWorkflow/SpiffWorkflow/specs/Simple.py | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from .base import TaskSpec
class Simple(TaskSpec):
"""
This class implements a task with one or more inputs and
any number of outputs.
If more than one input is connected, the task performs an implicit
multi merge.
If more than one output is connected, the task performs an implicit
parallel split.
"""
def serialize(self, serializer):
return serializer.serialize_simple(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_simple(wf_spec, s_state)
| 1,370 | Python | .py | 33 | 38.545455 | 71 | 0.761261 | sartography/SpiffWorkflow | 1,663 | 310 | 6 | LGPL-3.0 | 9/5/2024, 5:08:37 PM (Europe/Amsterdam) |
Subsets and Splits