status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } HashMap<Long, Project> userProjects = new HashMap<>(Constants.DEFAULT_HASH_MAP_SIZE); projectMapper.queryProjectCreatedAndAuthorizedByUserId(loginUser.getId()) .forEach(userProject -> userProjects.put(userProject.getCode(), userProject)); List<ProcessDefinition> processDefinitionListInProject = processDefinitionList.stream() .filter(o -> userProjects.containsKey(o.getProjectCode())).collect(Collectors.toList()); if (CollectionUtils.isEmpty(processDefinitionListInProject)) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionListInProject) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} /** * Encapsulates the TreeView structure * * @param projectCode project code * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long projectCode, long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition || projectCode != processDefinition.getProjectCode()) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeCode = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); TaskNode taskNode = dag.getNode(nodeCode); treeViewDto.setType(taskNode.getType());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
treeViewDto.setCode(taskNode.getCode()); treeViewDto.setName(taskNode.getName()); for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndCode(processInstance.getId(), Long.parseLong(nodeCode)); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); long subProcessCode = 0L; if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessCode = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_CODE).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessCode)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeCode); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeCode : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeCode);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeCode, treeViewDtoList); } } runningNodeMap.remove(nodeCode); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(Long.toString(taskNodeResponse.getCode()), taskNodeResponse); } for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, Long.toString(taskNodeResponse.getCode()))) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); if (result.get(Constants.STATUS) == Status.NOT_SUPPORT_COPY_TASK_TYPE) { return result; } checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * Will be deleted * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (org.apache.commons.lang.StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { List<TaskDefinitionLog> taskDefinitionLogs = processService.genTaskDefineList(processTaskRelations); Map<Long, Long> taskCodeMap = new HashMap<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (TaskType.CONDITIONS.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.SWITCH.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.SUB_PROCESS.getDesc().equals(taskDefinitionLog.getTaskType()) || TaskType.DEPENDENT.getDesc().equals(taskDefinitionLog.getTaskType())) { putMsg(result, Status.NOT_SUPPORT_COPY_TASK_TYPE, taskDefinitionLog.getTaskType()); return; } try {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
long taskCode = CodeGenerateUtils.getInstance().genCode(); taskCodeMap.put(taskDefinitionLog.getCode(), taskCode); taskDefinitionLog.setCode(taskCode); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); throw new ServiceException(Status.INTERNAL_SERVER_ERROR_ARGS); } taskDefinitionLog.setProjectCode(targetProjectCode); taskDefinitionLog.setVersion(0); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); } for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPreTaskCode() > 0) { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); } if (processTaskRelationLog.getPostTaskCode() > 0) { processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); } } try { processDefinition.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); throw new ServiceException(Status.INTERNAL_SERVER_ERROR_ARGS); } processDefinition.setId(0); processDefinition.setUserId(loginUser.getId()); processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); if (StringUtils.isNotBlank(processDefinition.getLocations())) { ArrayNode jsonNodes = JSONUtils.parseArray(processDefinition.getLocations());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
for (int i = 0; i < jsonNodes.size(); i++) { ObjectNode node = (ObjectNode) jsonNodes.path(i); node.put("taskCode", taskCodeMap.get(node.get("taskCode").asLong())); jsonNodes.set(i, node); } processDefinition.setLocations(JSONUtils.toJsonString(jsonNodes)); } try { result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs)); } catch (Exception e) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR); } } else { try { result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR); } } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition) || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion <= 0) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } putMsg(result, Status.SUCCESS);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code, projectCode); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param version version number * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { if (processDefinition.getVersion() == version) { putMsg(result, Status.MAIN_TABLE_USING_VERSION); return result; } int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(processDefinition.getCode(), processDefinition.getVersion()); if (deleteLog == 0 || deleteRelationLog == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); } return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/** * create empty process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @return process definition code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createEmptyProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = CodeGenerateUtils.getInstance().genCode(); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, "", timeout, loginUser.getId(), tenantId); processDefinition.setExecutionType(executionType); result = createEmptyDagDefine(loginUser, processDefinition); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isBlank(scheduleJson)) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } Map<String, Object> scheduleResult = createDagSchedule(loginUser, processDefinition, scheduleJson); if (scheduleResult.get(Constants.STATUS) != Status.SUCCESS) { Status scheduleResultStatus = (Status) scheduleResult.get(Constants.STATUS); putMsg(result, scheduleResultStatus); throw new ServiceException(scheduleResultStatus); } return result; } private Map<String, Object> createEmptyDagDefine(User loginUser, ProcessDefinition processDefinition) { Map<String, Object> result = new HashMap<>(); int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); return result; } private Map<String, Object> createDagSchedule(User loginUser, ProcessDefinition processDefinition, String scheduleJson) { Map<String, Object> result = new HashMap<>(); Schedule scheduleObj = JSONUtils.parseObject(scheduleJson, Schedule.class); if (scheduleObj == null) { putMsg(result, Status.DATA_IS_NOT_VALID, scheduleJson); throw new ServiceException(Status.DATA_IS_NOT_VALID); } Date now = new Date();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
scheduleObj.setProcessDefinitionCode(processDefinition.getCode()); if (DateUtils.differSec(scheduleObj.getStartTime(), scheduleObj.getEndTime()) == 0) { logger.warn("The start time must not be the same as the end"); putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME); return result; } if (!org.quartz.CronExpression.isValidExpression(scheduleObj.getCrontab())) { logger.error("{} verify failure", scheduleObj.getCrontab()); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, scheduleObj.getCrontab()); return result; } scheduleObj.setWarningType(scheduleObj.getWarningType() == null ? WarningType.NONE : scheduleObj.getWarningType()); scheduleObj.setWarningGroupId(scheduleObj.getWarningGroupId() == 0 ? 1 : scheduleObj.getWarningGroupId()); scheduleObj.setFailureStrategy(scheduleObj.getFailureStrategy() == null ? FailureStrategy.CONTINUE : scheduleObj.getFailureStrategy()); scheduleObj.setCreateTime(now); scheduleObj.setUpdateTime(now); scheduleObj.setUserId(loginUser.getId()); scheduleObj.setReleaseState(ReleaseState.OFFLINE); scheduleObj.setProcessInstancePriority(scheduleObj.getProcessInstancePriority() == null ? Priority.MEDIUM : scheduleObj.getProcessInstancePriority()); scheduleObj.setWorkerGroup(scheduleObj.getWorkerGroup() == null ? "default" : scheduleObj.getWorkerGroup()); scheduleObj.setEnvironmentCode(scheduleObj.getEnvironmentCode() == null ? -1 : scheduleObj.getEnvironmentCode()); scheduleMapper.insert(scheduleObj); putMsg(result, Status.SUCCESS); result.put("scheduleId", scheduleObj.getId()); return result; } /** * update process definition basic info * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @param executionType executionType * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinitionBasicInfo(User loginUser, long projectCode, String name, long code, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } int tenantId = -1;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, "", timeout, tenantId); processDefinition.setExecutionType(executionType);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
List<ProcessTaskRelationLog> taskRelationList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); result = updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, Lists.newArrayList()); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isBlank(scheduleJson)) { return result; } Map<String, Object> scheduleResult = updateDagSchedule(loginUser, projectCode, code, scheduleJson); if (scheduleResult.get(Constants.STATUS) != Status.SUCCESS) { Status scheduleResultStatus = (Status) scheduleResult.get(Constants.STATUS); putMsg(result, scheduleResultStatus); throw new ServiceException(scheduleResultStatus); } return result; } private Map<String, Object> updateDagSchedule(User loginUser, long projectCode, long processDefinitionCode, String scheduleJson) { Map<String, Object> result = new HashMap<>(); Schedule schedule = JSONUtils.parseObject(scheduleJson, Schedule.class); if (schedule == null) { putMsg(result, Status.DATA_IS_NOT_VALID, scheduleJson); throw new ServiceException(Status.DATA_IS_NOT_VALID); } FailureStrategy failureStrategy = schedule.getFailureStrategy() == null ? FailureStrategy.CONTINUE : schedule.getFailureStrategy(); WarningType warningType = schedule.getWarningType() == null ? WarningType.NONE : schedule.getWarningType();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
Priority processInstancePriority = schedule.getProcessInstancePriority() == null ? Priority.MEDIUM : schedule.getProcessInstancePriority(); int warningGroupId = schedule.getWarningGroupId() == 0 ? 1 : schedule.getWarningGroupId(); String workerGroup = schedule.getWorkerGroup() == null ? "default" : schedule.getWorkerGroup(); long environmentCode = schedule.getEnvironmentCode() == null ? -1 : schedule.getEnvironmentCode(); ScheduleParam param = new ScheduleParam(); param.setStartTime(schedule.getStartTime()); param.setEndTime(schedule.getEndTime()); param.setCrontab(schedule.getCrontab()); param.setTimezoneId(schedule.getTimezoneId()); return schedulerService.updateScheduleByProcessDefinitionCode( loginUser, projectCode, processDefinitionCode, JSONUtils.toJsonString(param), warningType, warningGroupId, failureStrategy, processInstancePriority, workerGroup, environmentCode); } /** * release process definition and schedule * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState releaseState * @return update result code */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
@Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> releaseWorkflowAndSchedule(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(code); if (scheduleObj == null) { putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, "processDefinitionCode:" + code); return result; } switch (releaseState) { case ONLINE: List<ProcessTaskRelation> relationList = processService.findRelationByCode(projectCode, code); if (CollectionUtils.isEmpty(relationList)) { putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,203
[Bug] [Process definition] There is a problem with the location of the import process node.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The import process does not render the nodes in the workflow according to the coordinate position in the file. ![image](https://user-images.githubusercontent.com/19239641/144801054-e7ff9f02-dc71-4e34-95c3-0f331c3c2660.png) ### What you expected to happen The node position is not rendered based on coordinates. ### How to reproduce Upload the workflow file and click on the workflow details. ### Anything else _No response_ ### Version dev ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7203
https://github.com/apache/dolphinscheduler/pull/7811
ab89e4335e4a645f90a9c4a0c05a8efabb5e21c4
f0ad65cb143f81f8c94e864ac6b69fbe2151c0ca
2021-12-06T06:50:22Z
java
2022-01-05T08:28:55Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); scheduleObj.setReleaseState(ReleaseState.ONLINE); scheduleMapper.updateById(scheduleObj); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); if (updateProcess > 0) { logger.info("set schedule offline, project code: {}, schedule id: {}, process definition code: {}", projectCode, scheduleObj.getId(), code); scheduleObj.setReleaseState(ReleaseState.OFFLINE); int updateSchedule = scheduleMapper.updateById(scheduleObj); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), scheduleObj.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_COPY_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_MOVE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_PROCESS_DEFINITION_VERSION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
import static org.apache.dolphinscheduler.api.enums.Status.GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.IMPORT_PROCESS_DEFINE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_DETAIL_OF_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_LIST; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_VERSIONS_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.RELEASE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import springfox.documentation.annotations.ApiIgnore; /** * process definition controller */ @Api(tags = "PROCESS_DEFINITION_TAG") @RestController @RequestMapping("projects/{projectCode}/process-definition") public class ProcessDefinitionController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionController.class); @Autowired private ProcessDefinitionService processDefinitionService; /**
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
* create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @ApiOperation(value = "createProcessDefinition", notes = "CREATE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "locations", value = "PROCESS_DEFINITION_LOCATIONS", required = true, type = "String"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String") }) @PostMapping() @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "locations", required = false) String locations,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "taskRelationJson", required = true) String taskRelationJson, @RequestParam(value = "taskDefinitionJson", required = true) String taskDefinitionJson, @RequestParam(value = "executionType", defaultValue = "PARALLEL") ProcessExecutionTypeEnum executionType) { Map<String, Object> result = processDefinitionService.createProcessDefinition(loginUser, projectCode, name, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson,executionType); return returnDataList(result); } /** * copy process definition * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param targetProjectCode target project code * @return copy result code */ @ApiOperation(value = "batchCopyByCodes", notes = "COPY_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, dataType = "String", example = "3,4"), @ApiImplicitParam(name = "targetProjectCode", value = "TARGET_PROJECT_CODE", required = true, dataType = "Long", example = "123") }) @PostMapping(value = "/batch-copy") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_COPY_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result copyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "codes", required = true) String codes,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) { return returnDataList(processDefinitionService.batchCopyProcessDefinition(loginUser, projectCode, codes, targetProjectCode)); } /** * move process definition * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param targetProjectCode target project code * @return move result code */ @ApiOperation(value = "batchMoveByCodes", notes = "MOVE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, dataType = "String", example = "3,4"), @ApiImplicitParam(name = "targetProjectCode", value = "TARGET_PROJECT_CODE", required = true, dataType = "Long", example = "123") }) @PostMapping(value = "/batch-move") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_MOVE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result moveProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "codes", required = true) String codes, @RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) { return returnDataList(processDefinitionService.batchMoveProcessDefinition(loginUser, projectCode, codes, targetProjectCode)); } /** * verify process definition name unique *
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
* @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @ApiOperation(value = "verify-name", notes = "VERIFY_PROCESS_DEFINITION_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String") }) @GetMapping(value = "/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result verifyProcessDefinitionName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name) { Map<String, Object> result = processDefinitionService.verifyProcessDefinitionName(loginUser, projectCode, name); return returnDataList(result); } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param locations locations for nodes * @param timeout timeout
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
* @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @ApiOperation(value = "update", notes = "UPDATE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "locations", value = "PROCESS_DEFINITION_LOCATIONS", required = true, type = "String"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String"), @ApiImplicitParam(name = "releaseState", value = "RELEASE_PROCESS_DEFINITION_NOTES", required = false, dataType = "ReleaseState") }) @PutMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "locations", required = false) String locations, @RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "taskRelationJson", required = true) String taskRelationJson, @RequestParam(value = "taskDefinitionJson", required = true) String taskDefinitionJson, @RequestParam(value = "executionType", defaultValue = "PARALLEL") ProcessExecutionTypeEnum executionType, @RequestParam(value = "releaseState", required = false, defaultValue = "OFFLINE") ReleaseState releaseState) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
Map<String, Object> result = processDefinitionService.updateProcessDefinition(loginUser, projectCode, name, code, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson,executionType); if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataList(result); } if (releaseState == ReleaseState.ONLINE) { result = processDefinitionService.releaseProcessDefinition(loginUser, projectCode, code, releaseState); } return returnDataList(result); } /** * query process definition version paging list info * * @param loginUser login user info * @param projectCode project code * @param pageNo the process definition version list current page number * @param pageSize the process definition version list page size * @param code the process definition code * @return the process definition version list */ @ApiOperation(value = "queryVersions", notes = "QUERY_PROCESS_DEFINITION_VERSIONS_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1") }) @GetMapping(value = "/{code}/versions") @ResponseStatus(HttpStatus.OK)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiException(QUERY_PROCESS_DEFINITION_VERSIONS_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionVersions(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "pageNo") int pageNo, @RequestParam(value = "pageSize") int pageSize, @PathVariable(value = "code") long code) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } result = processDefinitionService.queryProcessDefinitionVersions(loginUser, projectCode, pageNo, pageSize, code); return result; } /** * switch certain process definition version * * @param loginUser login user info * @param projectCode project code * @param code the process definition code * @param version the version user want to switch * @return switch version result code */ @ApiOperation(value = "switchVersion", notes = "SWITCH_PROCESS_DEFINITION_VERSION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1"), @ApiImplicitParam(name = "version", value = "VERSION", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{code}/versions/{version}") @ResponseStatus(HttpStatus.OK)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiException(SWITCH_PROCESS_DEFINITION_VERSION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result switchProcessDefinitionVersion(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code") long code, @PathVariable(value = "version") int version) { Map<String, Object> result = processDefinitionService.switchProcessDefinitionVersion(loginUser, projectCode, code, version); return returnDataList(result); } /** * delete the certain process definition version by version and process definition code * * @param loginUser login user info * @param projectCode project code * @param code the process definition code * @param version the process definition version user want to delete * @return delete version result code */ @ApiOperation(value = "deleteVersion", notes = "DELETE_PROCESS_DEFINITION_VERSION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1"), @ApiImplicitParam(name = "version", value = "VERSION", required = true, dataType = "Int", example = "100") }) @DeleteMapping(value = "/{code}/versions/{version}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_PROCESS_DEFINITION_VERSION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteProcessDefinitionVersion(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code") long code,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@PathVariable(value = "version") int version) { Map<String, Object> result = processDefinitionService.deleteProcessDefinitionVersion(loginUser, projectCode, code, version); return returnDataList(result); } /** * release process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @ApiOperation(value = "release", notes = "RELEASE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "releaseState", value = "PROCESS_DEFINITION_RELEASE", required = true, dataType = "ReleaseState"), }) @PostMapping(value = "/{code}/release") @ResponseStatus(HttpStatus.OK) @ApiException(RELEASE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result releaseProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "releaseState", required = true) ReleaseState releaseState) { Map<String, Object> result = processDefinitionService.releaseProcessDefinition(loginUser, projectCode, code, releaseState); return returnDataList(result); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
/** * query detail of process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @ApiOperation(value = "queryProcessDefinitionByCode", notes = "QUERY_PROCESS_DEFINITION_BY_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789") }) @GetMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DETAIL_OF_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long code) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionByCode(loginUser, projectCode, code); return returnDataList(result); } /** * query detail of process definition by name * * @param loginUser login user * @param projectCode project code * @param name process definition name * @return process definition detail */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiOperation(value = "queryProcessDefinitionByName", notes = "QUERY_PROCESS_DEFINITION_BY_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, dataType = "String") }) @GetMapping(value = "/query-by-name") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DETAIL_OF_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result<ProcessDefinition> queryProcessDefinitionByName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("name") String name) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionByName(loginUser, projectCode, name); return returnDataList(result); } /** * query Process definition list * * @param loginUser login user * @param projectCode project code * @return process definition list */ @ApiOperation(value = "queryList", notes = "QUERY_PROCESS_DEFINITION_LIST_NOTES") @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionList(loginUser, projectCode); return returnDataList(result);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
} /** * query Process definition simple list * * @param loginUser login user * @param projectCode project code * @return process definition list */ @ApiOperation(value = "querySimpleList", notes = "QUERY_PROCESS_DEFINITION_SIMPLE_LIST_NOTES") @GetMapping(value = "/simple-list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionSimpleList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionSimpleList(loginUser, projectCode); return returnDataList(result); } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param pageNo page number * @param pageSize page size * @param userId user id * @return process definition page */ @ApiOperation(value = "queryListPaging", notes = "QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", required = false, type = "String"), @ApiImplicitParam(name = "userId", value = "USER_ID", required = false, dataType = "Int", example = "100"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10") }) @GetMapping() @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam(value = "userId", required = false, defaultValue = "0") Integer userId, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); return processDefinitionService.queryProcessDefinitionListPaging(loginUser, projectCode, searchVal, userId, pageNo, pageSize); } /** * encapsulation tree view structure * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param limit limit
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
* @return tree view json data */ @ApiOperation(value = "viewTree", notes = "VIEW_TREE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100"), @ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{code}/view-tree") @ResponseStatus(HttpStatus.OK) @ApiException(ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result viewTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code, @RequestParam("limit") Integer limit) { Map<String, Object> result = processDefinitionService.viewTree(projectCode, code, limit); return returnDataList(result); } /** * get tasks list by process definition code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return task list */ @ApiOperation(value = "getTasksByDefinitionCode", notes = "GET_TASK_LIST_BY_DEFINITION_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100") })
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@GetMapping(value = "/{code}/tasks") @ResponseStatus(HttpStatus.OK) @ApiException(GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR) public Result getNodeListByDefinitionCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code) { Map<String, Object> result = processDefinitionService.getTaskNodeListByDefinitionCode(loginUser, projectCode, code); return returnDataList(result); } /** * get tasks list map by process definition multiple code * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @return node list data */ @ApiOperation(value = "getTaskListByDefinitionCodes", notes = "GET_TASK_LIST_BY_DEFINITION_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, type = "String", example = "100,200,300") }) @GetMapping(value = "/batch-query-tasks") @ResponseStatus(HttpStatus.OK) @ApiException(GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR) public Result getNodeListMapByDefinitionCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes) { Map<String, Object> result = processDefinitionService.getNodeListMapByDefinitionCodes(loginUser, projectCode, codes); return returnDataList(result); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
/** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @ApiOperation(value = "deleteByCode", notes = "DELETE_PROCESS_DEFINITION_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", dataType = "Int", example = "100") }) @DeleteMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_PROCESS_DEFINE_BY_CODE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteProcessDefinitionByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code) { Map<String, Object> result = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, code); return returnDataList(result); } /** * batch delete process definition by codes * * @param loginUser login user * @param projectCode project code * @param codes process definition code list * @return delete result code */
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiOperation(value = "batchDeleteByCodes", notes = "BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "String") }) @PostMapping(value = "/batch-delete") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result batchDeleteProcessDefinitionByCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes) { Map<String, Object> result = new HashMap<>(); List<String> deleteFailedCodeList = new ArrayList<>(); if (!StringUtils.isEmpty(codes)) { String[] processDefinitionCodeArray = codes.split(","); for (String strProcessDefinitionCode : processDefinitionCodeArray) { long code = Long.parseLong(strProcessDefinitionCode); try { Map<String, Object> deleteResult = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, code); if (!Status.SUCCESS.equals(deleteResult.get(Constants.STATUS))) { deleteFailedCodeList.add(strProcessDefinitionCode); logger.error((String) deleteResult.get(Constants.MSG)); } } catch (Exception e) { deleteFailedCodeList.add(strProcessDefinitionCode); } } } if (!deleteFailedCodeList.isEmpty()) { putMsg(result, BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR, String.join(",", deleteFailedCodeList));
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
} else { putMsg(result, Status.SUCCESS); } return returnDataList(result); } /** * batch export process definition by codes * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param response response */ @ApiOperation(value = "batchExportByCodes", notes = "BATCH_EXPORT_PROCESS_DEFINITION_BY_CODES_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "String") }) @PostMapping(value = "/batch-export") @ResponseBody @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "response"}) public void batchExportProcessDefinitionByCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes, HttpServletResponse response) { try { processDefinitionService.batchExportProcessDefinitionByCodes(loginUser, projectCode, codes, response); } catch (Exception e) { logger.error(Status.BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR.getMsg(), e); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
/** * query all process definition by project code * * @param loginUser login user * @param projectCode project code * @return process definition list */ @ApiOperation(value = "queryAllByProjectCode", notes = "QUERY_PROCESS_DEFINITION_All_BY_PROJECT_CODE_NOTES") @GetMapping(value = "/all") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryAllProcessDefinitionByProjectCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode) { Map<String, Object> result = processDefinitionService.queryAllProcessDefinitionByProjectCode(loginUser, projectCode); return returnDataList(result); } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file resource file * @return import result code */ @ApiOperation(value = "importProcessDefinition", notes = "IMPORT_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile") }) @PostMapping(value = "/import")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiException(IMPORT_PROCESS_DEFINE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "file"}) public Result importProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("file") MultipartFile file) { Map<String, Object> result = processDefinitionService.importProcessDefinition(loginUser, projectCode, file); return returnDataList(result); } /** * create empty process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @return process definition code */ @ApiOperation(value = "createEmptyProcessDefinition", notes = "CREATE_EMPTY_PROCESS_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "projectCode", value = "PROJECT_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String") }) @PostMapping(value = "/empty") @ResponseStatus(HttpStatus.OK) @ApiException(CREATE_PROCESS_DEFINITION_ERROR)
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createEmptyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "scheduleJson", required = false) String scheduleJson, @RequestParam(value = "executionType", defaultValue = "PARALLEL") ProcessExecutionTypeEnum executionType) { return returnDataList(processDefinitionService.createEmptyProcessDefinition(loginUser, projectCode, name, description, globalParams, timeout, tenantCode, scheduleJson, executionType)); } /** * update process definition basic info * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @param executionType executionType * @param releaseState releaseState * @return update result code */ @ApiOperation(value = "updateBasicInfo", notes = "UPDATE_PROCESS_DEFINITION_BASIC_INFO_NOTES")
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
@ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String"), @ApiImplicitParam(name = "releaseState", value = "RELEASE_PROCESS_DEFINITION_NOTES", required = false, dataType = "ReleaseState") }) @PutMapping(value = "/{code}/basic-info") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateProcessDefinitionBasicInfo(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "scheduleJson", required = false) String scheduleJson, @RequestParam(value = "executionType", defaultValue = "PARALLEL") ProcessExecutionTypeEnum executionType, @RequestParam(value = "releaseState", required = false, defaultValue = "OFFLINE") ReleaseState releaseState) { Map<String, Object> result = processDefinitionService.updateProcessDefinitionBasicInfo(loginUser, projectCode, name, code, description, globalParams, timeout, tenantCode, scheduleJson, executionType); if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataList(result); } if (releaseState == ReleaseState.ONLINE) { result = processDefinitionService.releaseWorkflowAndSchedule(loginUser, projectCode, code, releaseState);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
} return returnDataList(result); } /** * release process definition and schedule * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState releaseState * @return update result code */ @ApiOperation(value = "releaseWorkflowAndSchedule", notes = "RELEASE_WORKFLOW_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "projectCode", value = "PROCESS_DEFINITION_NAME", required = true, type = "Long"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "releaseState", value = "RELEASE_PROCESS_DEFINITION_NOTES", required = true, dataType = "ReleaseState") }) @PostMapping(value = "/{code}/release-workflow") @ResponseStatus(HttpStatus.OK) @ApiException(RELEASE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result releaseWorkflowAndSchedule(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "releaseState", required = true, defaultValue = "OFFLINE") ReleaseState releaseState) { return returnDataList(processDefinitionService.releaseWorkflowAndSchedule(loginUser, projectCode, code, releaseState)); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import javax.servlet.http.HttpServletResponse; import org.springframework.web.multipart.MultipartFile; /** * process definition service */ public interface ProcessDefinitionService {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
/** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
* @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType); /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode); /** * query process definition simple list * * @param loginUser login user * @param projectCode project code * @return definition simple list
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
*/ Map<String, Object> queryProcessDefinitionSimpleList(User loginUser, long projectCode); /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param pageNo page number * @param pageSize page size * @param userId user id * @return process definition page */ Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize); /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
long code); /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @return process definition detail */ Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name); /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode); /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
* @param targetProjectCode targetProjectCode */ Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode); /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType); /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name); /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code); /** * release process definition: online / offline * * @param loginUser login user
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
* @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState); /** * batch export process definition by codes * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param response http servlet response */ void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response); /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ Map<String, Object> importProcessDefinition(User loginUser,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
long projectCode, MultipartFile file); /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ Map<String, Object> checkProcessNodeList(String processTaskRelationJson); /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code processDefinition code * @return task node list */ Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code); /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define code list * @return task node list */ Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode,
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
String codes); /** * query process definition all by project code * * @param projectCode project code * @return process definitions in the project */ Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode); /** * Encapsulates the TreeView structure * * @param projectCode project code * @param code process definition code * @param limit limit * @return tree view json data */ Map<String, Object> viewTree(long projectCode, long code, Integer limit); /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
/** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code); /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delele result code */ Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version); /** * create empty process definition
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
* * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param timeout timeout * @param tenantCode tenantCode * @param scheduleJson scheduleJson * @return process definition code */ Map<String, Object> createEmptyProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType); /** * update process definition basic info * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param timeout timeout
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
* @param tenantCode tenantCode * @param scheduleJson scheduleJson * @param executionType executionType * @return update result code */ Map<String, Object> updateProcessDefinitionBasicInfo(User loginUser, long projectCode, String name, long code, String description, String globalParams, int timeout, String tenantCode, String scheduleJson, ProcessExecutionTypeEnum executionType); /** * release process definition and schedule * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState releaseState * @return update result code */ Map<String, Object> releaseWorkflowAndSchedule(User loginUser, long projectCode, long code, ReleaseState releaseState); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_CODE; import org.apache.dolphinscheduler.api.dto.DagDataSchedule;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
import org.apache.dolphinscheduler.api.dto.ScheduleParam; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ProcessExecutionTypeEnum; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils; import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.DagData;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.collect.Lists; /** * process definition service impl */ @Service
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
@Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = CodeGenerateUtils.getInstance().genCode(); } catch (CodeGenerateException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); processDefinition.setExecutionType(executionType); return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} private Map<String, Object> createDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } return result; } private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
Map<String, Object> result = new HashMap<>(); try { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream()
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
.map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exist"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, org.apache.commons.lang.StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition simple list * * @param loginUser login user * @param projectCode project code * @return definition simple list
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
*/ @Override public Map<String, Object> queryProcessDefinitionSimpleList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); ArrayNode arrayNode = JSONUtils.createArrayNode(); for (ProcessDefinition processDefinition : processDefinitions) { ObjectNode processDefinitionNode = JSONUtils.createObjectNode(); processDefinitionNode.put("id", processDefinition.getId()); processDefinitionNode.put("code", processDefinition.getCode()); processDefinitionNode.put("name", processDefinition.getName()); processDefinitionNode.put("projectCode", processDefinition.getProjectCode()); arrayNode.add(processDefinitionNode); } result.put(Constants.DATA_LIST, arrayNode); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson, ProcessExecutionTypeEnum executionType) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); processDefinition.setExecutionType(executionType); return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
if (insertVersion == 0) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(code); if (scheduleObj != null) { if (scheduleObj.getReleaseState() == ReleaseState.OFFLINE) { int delete = scheduleMapper.deleteById(scheduleObj.getId()); if (delete == 0) { putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); throw new ServiceException(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); } } if (scheduleObj.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, scheduleObj.getId()); return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); if (delete == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (deleteRelation == 0) { logger.warn("The process definition has not relation, it will be delete successfully"); } putMsg(result, Status.SUCCESS); return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
return result; } if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } switch (releaseState) { case ONLINE: List<ProcessTaskRelation> relationList = processService.findRelationByCode(projectCode, code); if (CollectionUtils.isEmpty(relationList)) { putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); Schedule schedule = scheduleMapper.queryByProcessDefinitionCode(code); if (updateProcess > 0 && schedule != null) { logger.info("set schedule offline, project code: {}, schedule id: {}, process definition code: {}", projectCode, schedule.getId(), code); schedule.setReleaseState(releaseState);
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
int updateSchedule = scheduleMapper.updateById(schedule); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (org.apache.commons.lang.StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { return; } List<ProcessDefinition> processDefinitionListInProject = processDefinitionList.stream().filter(o -> projectCode == o.getProjectCode()).collect(Collectors.toList()); List<DagDataSchedule> dagDataSchedules = processDefinitionListInProject.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close();
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { Schedule scheduleObj = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (scheduleObj != null) { scheduleObj.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(scheduleObj); } return dagDataSchedule; } /** * import process definition
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
* * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setId(0); processDefinition.setProjectCode(projectCode); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(CodeGenerateUtils.getInstance().genCode()); } catch (CodeGenerateException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId());
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = CodeGenerateUtils.getInstance().genCode(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (CodeGenerateException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); if (taskCodeMap.containsKey(processTaskRelationLog.getPreTaskCode())) { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); } if (taskCodeMap.containsKey(processTaskRelationLog.getPostTaskCode())) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); } processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } if (StringUtils.isNotEmpty(processDefinition.getLocations()) && JSONUtils.checkJsonValid(processDefinition.getLocations())) { ArrayNode arrayNode = JSONUtils.parseArray(processDefinition.getLocations()); ArrayNode newArrayNode = JSONUtils.createArrayNode(); for (int i = 0; i < arrayNode.size(); i++) { ObjectNode newObjectNode = newArrayNode.addObject(); JsonNode jsonNode = arrayNode.get(i); Long taskCode = taskCodeMap.get(jsonNode.get("taskCode").asLong()); if (Objects.nonNull(taskCode)) { newObjectNode.put("taskCode", taskCode); newObjectNode.set("x", jsonNode.get("x")); newObjectNode.set("y", jsonNode.get("y")); } } processDefinition.setLocations(newArrayNode.toString()); } Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList()); if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) { putMsg(createDagResult, Status.SUCCESS); } else { result.putAll(createDagResult); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) {
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
7,016
[Feature][Auto DAG] Auto create workflow while import sql script with specific hint
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Expand exists import process in ![image](https://user-images.githubusercontent.com/15820530/143562382-7d49c649-3c43-4280-856e-612c090717e3.png) Auto create workflow while import sql script with specific hint `name` and `upstream`. would create `sql task type` task for and set dependence according setting. ### Use case When user import sql scripts with specific hint in header, it will create sql task for each sql scripts, and then parse the hint to set sql task upstream. After parsing all sql script, we would create the same number of tasks as the number of sql scripts(files). Beside that, we would connect tasks by sql scripts given hint and set task relation for it. if sql scripts set not exists upstream task we should pop a dialog and ask if ignore dependent or not. If user choose "yes" we should import but ignore the error, if choose "no", we should termine import without create any task or workflow. The flow chat as below: > source file is in https://drive.google.com/file/d/1aV4nHH9_xf8z9WiyT6_-rDlWv2fpXzEj/view?usp=sharing ![DS-AutoDAG-flow-chat drawio](https://user-images.githubusercontent.com/15820530/143552961-267ee1cf-4c9b-498e-9e9f-9a0ea4de355b.png) ## SQL scripts example And here is an example about sql scripts. Each sql script should have two hint, `name` to specific sql task name , and `upstream` to set task upstream for this task * `start.sql`: If both `name` and `upstream` hint is provided, we just use them to set task name and upstream task, if `upstream` set to root, mean task is the root task in the workflow ```sql -- name: start_auto_dag -- upstream: root insert into table start_auto_dag select 1; ``` * `child1.sql`: When task have upstream task, you could just set the value as `upstream`. And task relation would be create after autodag parser done. ```sql -- name: branch_one -- upstream: start_auto_dag insert into table branch_one select * from start_auto_dag; ``` * `branch_two.sql`: If hint `name` not provide, we would use sql script filename as task name. In this case, we use `barnch_two` as task name, and set `start_auto_dag` as upstream task. ```sql -- upstream: start_auto_dag insert into table branch_two select * from start_auto_dag; ``` * `end.sql`: If task have two upstreams, you could list two task name and using specific delimiter for it, as an example we use `,` as delimiter, and set task `branch_one` and `branch_two` as upstream ```sql -- name: end_auto_dag -- upstream: branch_one, branch_two insert into table end_auto_dag select * from branch_one union all union all select * from branch_two union all ``` * `independence.sql`: If upstream hint not set, we would use `root` as default. So it would become independence task in the workflow ```sql select 'I am the independence of this workflow' ``` After we submit and DS parse, would could get workflow as below ``` -> branch_one -> / \ start_auto_dag -> -> end_auto_dag \ / -> branch_two -> independence ``` ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/7016
https://github.com/apache/dolphinscheduler/pull/7214
a826d37e54c45119a380ae65ebb47a749037efd3
4c2f77ee9cbd599edfb38e4bf82755f74e96e7c6
2021-11-26T09:59:33Z
java
2022-01-05T09:55:08Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
} /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null || projectCode != processDefinition.getProjectCode()) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition